diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..83d5fd2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +.DS_Store +.bloop +.bsp +.bsp/ +.history +.metals +.vscode/ +/*.iml +/.classpath +/.idea +/.idea_modules +/.project +/.settings +/RUNNING_PID +/bin/ +/out +conf/felix-cache +dist +felix-cache +js/node_modules +logs +project/target +shared/.js +shared/.jvm +target +tmp diff --git a/README.md b/README.md index c6e0bd8..e5c4fc6 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,24 @@ -# modules -Description +# Development + +## IntelliJ + +Open Build Tools -> sbt and make sure to enable sbt shell for project reloads/builds. + +## Build Javascript Libraries + +``` +fastOptJS::webpack +``` + +## Run WebServer + +``` +python -m SimpleHTTPServer +``` + +## Compile Cycle + +``` +~compile +``` + \ No newline at end of file diff --git a/airbyte.jq b/airbyte.jq new file mode 100644 index 0000000..7c0038b --- /dev/null +++ b/airbyte.jq @@ -0,0 +1,35 @@ +def p($p): "

\($p)

"; + +# input: array of arrays +def row2html: + reduce .[] as $value (""; . + "\($value)") + ""; + +# with style +def row2html($style): + reduce .[] as $value (""; + . + "\($value)") + ""; + +# input: an array of arrays, the first being treated as a header row +def table2html($tablestyle; $headerstyle): + "", + "", + (.[0] | row2html($headerstyle)), + (.[1:][] | row2html), + "", + "
" ; + +def atomicKeys2arrays: + # emit an array of atomic keys + def atomicKeys: to_entries | map( select(.value|scalars) | .key); + (.[0] | atomicKeys) as $keys + | $keys, + (.[] | [ .[$keys[]]]); + +def tableStyle: "\"border-collapse: collapse; width: 100%;\" border=\"1\"" ; +def headerStyle: "\"text-align: center;\"" ; + +def table2html: table2html(tableStyle; headerStyle); + +def airbyteParameters: + .connectionSpecification.properties | .[] | [.title,(.type | ./"" | first |= ascii_upcase | add),.description] | join("|"); + diff --git a/airbyte.sh b/airbyte.sh new file mode 100755 index 0000000..5760ee9 --- /dev/null +++ b/airbyte.sh @@ -0,0 +1,105 @@ +#!/bin/bash +WEBFLOW_URL="https://api.webflow.com/collections/631aeff93e54dc14b555344a/items" +WEBFLOW_AUTHOR="Person_5e22e90adaefb87e2849af40" +RES="jvm/src/main/resources/airbyte" +CONNECTORS="/tmp/airbyte/airbyte-integrations/connectors" + +# rm -fr $RES +# mkdir -p $RES +# rm -fr /tmp/airbyte +# git clone --depth=1 https://github.com/airbytehq/airbyte.git /tmp/airbyte + +# # Remove unneeded integrations +# rm -fr $CONNECTORS/*scaffold* +# rm -fr $CONNECTORS/*test* +# rm -fr $CONNECTORS/*tutorial* +rm -fr /tmp/connectors* + +# Cache list of existing Webflow connectors +x=0 +while [ $x -le 5 ] +do + OFFSET=$(( $x * 100)) + echo "Caching connectors at offset $OFFSET" + curl -o /tmp/connectors_$x -s --request GET \ + --url "$WEBFLOW_URL?live=true&offset=$OFFSET" \ + --header "Accept-Version: 1.0.0" \ + --header "Authorization: Bearer $WEBFLOW_TOKEN" + x=$(( $x + 1 )) +done + +cat /tmp/connectors* | jq -n '{ items: [ inputs.items ] | add }' > /tmp/connectors + +# For each of the connectors +for dir in $CONNECTORS/*; do + SPEC_PATH=$(find $dir -name spec.json) + if [[ "$SPEC_PATH" == *airbyte* ]]; then + + # Get connector info + SPEC_NAME=$(echo "$SPEC_PATH" | cut -d/ -f6) + DIRECTION=(${SPEC_NAME//-/ }[1]) + TITLE=$(cat $SPEC_PATH | jq -r '.connectionSpecification.title' | sed -e 's/ Spec//g' | sed -e 's/ Destination//g' | sed -e 's/Destination //g' | sed -e 's/ Source//g' | sed -e 's/Source //g') + TITLE=$(echo $TITLE | sed -e 's/ Connection Configuration//g' | sed -e 's/ Configuration//g') + REQUIRED=$(cat $SPEC_PATH | jq -r '.connectionSpecification.required') + PARAMETERS="$(cat $SPEC_PATH | jq -r 'include "airbyte"; airbyteParameters')" + PARAMETERS_TABLE=""$(echo "$PARAMETERS" | sed -e 's#|#
NameTypeDescription
#g' | sed -e 's#^#
#g' | sed -e 's#"#\\"#g' )"
" + + WEBFLOW_DATA="{ + \"fields\": { + \"name\": \"$TITLE\", + \"slug\": \"$SPEC_NAME\", + \"direction\": \"$DIRECTION\", + \"parameters\": \"$PARAMETERS_TABLE\", + \"_archived\": false, + \"_draft\": false + } + }" + + echo $WEBFLOW_DATA > /tmp/connector_data + + # Create or update connector in Webflow + WEBFLOW_ITEM=$(cat /tmp/connectors | jq ".items | map(select(.slug == \"$SPEC_NAME\"))[0]") + if [[ $WEBFLOW_ITEM == "null" ]]; then + echo "Creating $SPEC_NAME" + curl -s --request POST \ + --url "$WEBFLOW_URL?live=true" \ + --header "Accept-Version: 1.0.0" \ + --header "Authorization: Bearer $WEBFLOW_TOKEN" \ + --header "Content-Type: application/json" \ + --data-binary "@/tmp/connector_data" + else + ITEM_ID=$(echo $WEBFLOW_ITEM | jq -r "._id") + echo "Updating $SPEC_NAME with id: $ITEM_ID" + + curl -s --request PUT \ + --url "$WEBFLOW_URL/$ITEM_ID?live=true" \ + --header "Accept-Version: 1.0.0" \ + --header "Authorization: Bearer $WEBFLOW_TOKEN" \ + --header "Content-Type: application/json" \ + --data-binary "@/tmp/connector_data" + fi + + # Copy JSON file + cp $SPEC_PATH $RES/$SPEC_NAME.json + + # Add to localisation file + KEYS=$(jq -r '[. as $in | (paths(scalars), paths((. | length == 0)?)) | join(".") as $key | $key + "=" + ($in | getpath($key | split(".") | map((. | tonumber)? // .)) | tostring) ] | sort | .[]' $SPEC_PATH) + + TITLES=$(echo "$KEYS" | grep "title=" | grep properties | sed -e "s/connectionSpecification.properties/$SPEC_NAME/g") + S_TITLES=$(echo "$TITLES" | sed -e "s/source-/datasources.section.source-/g") + D_TITLES=$(echo "$S_TITLES" | sed -e "s/destination-/datasources.section.destination-/g") + echo "$D_TITLES" | grep -v "connectionSpecification" | grep -v "scaffold-" | sed -e '/^$/d' >> "$RES/messages_airbyte_en" + + DESCRIPTIONS=$(echo "$KEYS" | grep "description=" | grep properties | sed -e "s/connectionSpecification.properties/$SPEC_NAME/g") + S_DESCRIPTIONS=$(echo "$DESCRIPTIONS" | sed -e "s/source-/datasources.section.source-/g") + D_DESCRIPTIONS=$(echo "$S_DESCRIPTIONS" | sed -e "s/destination-/datasources.section.destination-/g") + echo "$D_DESCRIPTIONS" | grep -v "connectionSpecification" | grep -v "scaffold-" | sed -e '/^$/d' >> "$RES/messages_airbyte_en" + fi +done + +# echo "Removing references to Airbyte .." +# sed -i '' 's/airbyte.io/harana.com/g' "$RES/messages_airbyte_en" +# sed -i '' 's/airbyte/harana/g' "$RES/messages_airbyte_en" +# sed -i '' 's/airbytehq/harana/g' "$RES/messages_airbyte_en" +# sed -i '' 's/Airbyte/harana/g' "$RES/messages_airbyte_en" +# sed -i '' 's/ (Optional)//g' "$RES/messages_airbyte_en" \ No newline at end of file diff --git a/build.sbt b/build.sbt new file mode 100755 index 0000000..4c6811e --- /dev/null +++ b/build.sbt @@ -0,0 +1,75 @@ +import sbt._ +import com.harana.sbt.common._ + +val modules = haranaCrossProject("modules").in(file(".")) + .settings( + libraryDependencies ++= + Library.circe.value + ) + .jvmSettings( + unmanagedBase := baseDirectory.value / "lib", + libraryDependencies ++= + Library.airbyte.value ++ + Library.alluxio.value ++ + Library.awsS3v2.value ++ + Library.dockerJava.value ++ + Library.googleServiceApi.value ++ + Library.jackson.value ++ + Library.jgrapht.value ++ + Library.json4s.value ++ + Library.netty.value ++ + Library.pac4j.value ++ + Library.scala.value ++ + Library.testing.value ++ + Library.vertx.value ++ + Library.vfs.value ++ + Library.zio2.value :+ + Library.airtable.value :+ + Library.avro4s.value :+ + Library.awsJavaSes.value :+ + Library.awsScalaIam.value :+ + Library.awsScalaS3.value :+ + Library.auth0.value :+ + Library.betterFiles.value :+ + Library.calciteCore.value :+ + Library.chargebee.value :+ + Library.chimney.value :+ + Library.commonsEmail.value :+ + Library.deepstream.value :+ + Library.facebook.value :+ + Library.kryo.value :+ + Library.handlebars.value :+ + Library.jasyncfio.value :+ + Library.javaWebsocket.value :+ + Library.jbrowserDriver.value :+ + Library.jgit.value :+ + Library.jsch.value :+ + Library.jsoup.value :+ + Library.kubernetesClient.value :+ + Library.meilisearch.value :+ + Library.mixpanel.value :+ + Library.ognl.value :+ + Library.ohc.value :+ + Library.playJsonExtensions.value :+ + Library.pureCsv.value :+ + Library.redisson.value :+ + Library.segment.value :+ + Library.sentry.value :+ + Library.shopify.value :+ + Library.skuber.value :+ + Library.sshj.value :+ + Library.siteCrawler.value :+ + Library.snappy.value :+ + Library.slack.value :+ + Library.stripe.value :+ + Library.sundial.value :+ + Library.thumbnailator.value :+ + Library.unboundid.value :+ + Library.youiClient.value :+ + Library.zendeskClient.value :+ + Library.zip4j.value :+ + Library.zstd.value :+ + Library.ztZip.value :+ + "com.harana" %%% "modules-core" % "1.0.0" :+ + "com.harana" %%% "sdk" % "1.0.0" + ) diff --git a/js/package-lock.json b/js/package-lock.json new file mode 100644 index 0000000..f6eb3bf --- /dev/null +++ b/js/package-lock.json @@ -0,0 +1,26 @@ +{ + "name": "id-jwt", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "id-jwt", + "devDependencies": { + "typescript": "^5.1.6" + } + }, + "node_modules/typescript": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", + "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + } + } +} diff --git a/js/package.json b/js/package.json new file mode 100644 index 0000000..cd85bed --- /dev/null +++ b/js/package.json @@ -0,0 +1,12 @@ +{ + "name": "id-jwt", + "type": "module", + "private": true, + "scripts": { + "dev": "vite", + "build": "vite build" + }, + "devDependencies": { + "typescript": "^5.1.6" + } +} diff --git a/js/pnpm-lock.yaml b/js/pnpm-lock.yaml new file mode 100644 index 0000000..b17cc7f --- /dev/null +++ b/js/pnpm-lock.yaml @@ -0,0 +1,18 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +devDependencies: + typescript: + specifier: ^5.1.6 + version: 5.3.3 + +packages: + + /typescript@5.3.3: + resolution: {integrity: sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==} + engines: {node: '>=14.17'} + hasBin: true + dev: true diff --git a/js/yarn.lock b/js/yarn.lock new file mode 100644 index 0000000..8a152c6 --- /dev/null +++ b/js/yarn.lock @@ -0,0 +1,8 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +typescript@^5.1.6: + version "5.1.6" + resolved "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz" + integrity sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA== diff --git a/jvm/lib/io.airbyte-airbyte-api-0.39.41-alpha.jar b/jvm/lib/io.airbyte-airbyte-api-0.39.41-alpha.jar new file mode 100644 index 0000000..f7c7841 Binary files /dev/null and b/jvm/lib/io.airbyte-airbyte-api-0.39.41-alpha.jar differ diff --git a/jvm/lib/io.airbyte-airbyte-protocol-0.39.41-alpha.jar b/jvm/lib/io.airbyte-airbyte-protocol-0.39.41-alpha.jar new file mode 100644 index 0000000..1b06d68 Binary files /dev/null and b/jvm/lib/io.airbyte-airbyte-protocol-0.39.41-alpha.jar differ diff --git a/jvm/lib/scala-stripe_2.13-1.1.30.jar b/jvm/lib/scala-stripe_2.13-1.1.30.jar new file mode 100644 index 0000000..dcca71d Binary files /dev/null and b/jvm/lib/scala-stripe_2.13-1.1.30.jar differ diff --git a/jvm/src/main/resources/airbyte/destination-amazon-sqs.json b/jvm/src/main/resources/airbyte/destination-amazon-sqs.json new file mode 100644 index 0000000..415cbaf --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-amazon-sqs.json @@ -0,0 +1,95 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/amazon-sqs", + "supported_destination_sync_modes": ["append"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination Amazon Sqs", + "type": "object", + "required": ["queue_url", "region"], + "additionalProperties": false, + "properties": { + "queue_url": { + "title": "Queue URL", + "description": "URL of the SQS Queue", + "type": "string", + "examples": [ + "https://sqs.eu-west-1.amazonaws.com/1234567890/my-example-queue" + ], + "order": 0 + }, + "region": { + "title": "AWS Region", + "description": "AWS Region of the SQS Queue", + "type": "string", + "enum": [ + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "me-south-1", + "us-gov-east-1", + "us-gov-west-1" + ], + "order": 1 + }, + "message_delay": { + "title": "Message Delay", + "description": "Modify the Message Delay of the individual message from the Queue's default (seconds).", + "type": "integer", + "examples": ["15"], + "order": 2 + }, + "access_key": { + "title": "AWS IAM Access Key ID", + "description": "The Access Key ID of the AWS IAM Role to use for sending messages", + "type": "string", + "examples": ["xxxxxHRNxxx3TBxxxxxx"], + "order": 3, + "airbyte_secret": true + }, + "secret_key": { + "title": "AWS IAM Secret Key", + "description": "The Secret Key of the AWS IAM Role to use for sending messages", + "type": "string", + "examples": ["hu+qE5exxxxT6o/ZrKsxxxxxxBhxxXLexxxxxVKz"], + "order": 4, + "airbyte_secret": true + }, + "message_body_key": { + "title": "Message Body Key", + "description": "Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.", + "type": "string", + "examples": ["myDataPath"], + "order": 5 + }, + "message_group_id": { + "title": "Message Group Id", + "description": "The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.", + "type": "string", + "examples": ["my-fifo-group"], + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-aws-datalake.json b/jvm/src/main/resources/airbyte/destination-aws-datalake.json new file mode 100644 index 0000000..b5b876c --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-aws-datalake.json @@ -0,0 +1,107 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/aws-datalake", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "AWS Datalake Destination Spec", + "type": "object", + "required": ["credentials", "region", "bucket_name", "bucket_prefix"], + "additionalProperties": false, + "properties": { + "aws_account_id": { + "type": "string", + "title": "AWS Account Id", + "description": "target aws account id", + "examples": ["111111111111"] + }, + "region": { + "title": "AWS Region", + "type": "string", + "description": "Region name", + "airbyte_secret": false + }, + "credentials": { + "title": "Authentication mode", + "description": "Choose How to Authenticate to AWS.", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "IAM Role", + "required": ["role_arn", "credentials_title"], + "properties": { + "credentials_title": { + "type": "string", + "title": "Credentials Title", + "description": "Name of the credentials", + "const": "IAM Role", + "enum": ["IAM Role"], + "default": "IAM Role", + "order": 0 + }, + "role_arn": { + "title": "Target Role Arn", + "type": "string", + "description": "Will assume this role to write data to s3", + "airbyte_secret": false + } + } + }, + { + "type": "object", + "title": "IAM User", + "required": [ + "credentials_title", + "aws_access_key_id", + "aws_secret_access_key" + ], + "properties": { + "credentials_title": { + "type": "string", + "title": "Credentials Title", + "description": "Name of the credentials", + "const": "IAM User", + "enum": ["IAM User"], + "default": "IAM User", + "order": 0 + }, + "aws_access_key_id": { + "title": "Access Key Id", + "type": "string", + "description": "AWS User Access Key Id", + "airbyte_secret": true + }, + "aws_secret_access_key": { + "title": "Secret Access Key", + "type": "string", + "description": "Secret Access Key", + "airbyte_secret": true + } + } + } + ] + }, + "bucket_name": { + "title": "S3 Bucket Name", + "type": "string", + "description": "Name of the bucket", + "airbyte_secret": false + }, + "bucket_prefix": { + "title": "Target S3 Bucket Prefix", + "type": "string", + "description": "S3 prefix", + "airbyte_secret": false + }, + "lakeformation_database_name": { + "title": "Lakeformation Database Name", + "type": "string", + "description": "Which database to use", + "airbyte_secret": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-azure-blob-storage.json b/jvm/src/main/resources/airbyte/destination-azure-blob-storage.json new file mode 100644 index 0000000..d07ea2a --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-azure-blob-storage.json @@ -0,0 +1,91 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/azureblobstorage", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "AzureBlobStorage Destination Spec", + "type": "object", + "required": [ + "azure_blob_storage_account_name", + "azure_blob_storage_account_key", + "format" + ], + "additionalProperties": false, + "properties": { + "azure_blob_storage_endpoint_domain_name": { + "title": "Endpoint Domain Name", + "type": "string", + "default": "blob.core.windows.net", + "description": "This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.", + "examples": ["blob.core.windows.net"] + }, + "azure_blob_storage_container_name": { + "title": "Azure blob storage container (Bucket) Name", + "type": "string", + "description": "The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp", + "examples": ["airbytetescontainername"] + }, + "azure_blob_storage_account_name": { + "title": "Azure Blob Storage account name", + "type": "string", + "description": "The account's name of the Azure Blob Storage.", + "examples": ["airbyte5storage"] + }, + "azure_blob_storage_account_key": { + "title": "Azure Blob Storage account key", + "description": "The Azure blob storage account key.", + "airbyte_secret": true, + "type": "string", + "examples": [ + "Z8ZkZpteggFx394vm+PJHnGTvdRncaYS+JhLKdj789YNmD+iyGTnG+PV+POiuYNhBg/ACS+LKjd%4FG3FHGN12Nd==" + ] + }, + "azure_blob_storage_output_buffer_size": { + "title": "Azure Blob Storage output buffer size (Megabytes)", + "type": "integer", + "description": "The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.", + "minimum": 1, + "maximum": 2047, + "default": 5, + "examples": [5] + }, + "format": { + "title": "Output Format", + "type": "object", + "description": "Output data format", + "oneOf": [ + { + "title": "CSV: Comma-Separated Values", + "required": ["format_type", "flattening"], + "properties": { + "format_type": { + "type": "string", + "const": "CSV" + }, + "flattening": { + "type": "string", + "title": "Normalization (Flattening)", + "description": "Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.", + "default": "No flattening", + "enum": ["No flattening", "Root level flattening"] + } + } + }, + { + "title": "JSON Lines: newline-delimited JSON", + "required": ["format_type"], + "properties": { + "format_type": { + "type": "string", + "const": "JSONL" + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-bigquery-denormalized.json b/jvm/src/main/resources/airbyte/destination-bigquery-denormalized.json new file mode 100644 index 0000000..293424c --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-bigquery-denormalized.json @@ -0,0 +1,187 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/bigquery", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BigQuery Denormalized Typed Struct Destination Spec", + "type": "object", + "required": ["project_id", "dataset_id"], + "additionalProperties": true, + "properties": { + "project_id": { + "type": "string", + "description": "The GCP project ID for the project containing the target BigQuery dataset. Read more here.", + "title": "Project ID", + "order": 0 + }, + "dataset_id": { + "type": "string", + "description": "The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.", + "title": "Default Dataset ID", + "order": 1 + }, + "loading_method": { + "type": "object", + "title": "Loading Method *", + "description": "Loading method used to send select the way data will be uploaded to BigQuery.
Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.", + "order": 2, + "oneOf": [ + { + "title": "Standard Inserts", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "Standard" + } + } + }, + { + "title": "GCS Staging", + "type": "object", + "required": [ + "method", + "gcs_bucket_name", + "gcs_bucket_path", + "credential" + ], + "properties": { + "method": { + "type": "string", + "const": "GCS Staging", + "order": 0 + }, + "credential": { + "title": "Credential", + "description": "An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here.", + "type": "object", + "order": 1, + "oneOf": [ + { + "title": "HMAC key", + "order": 0, + "required": [ + "credential_type", + "hmac_key_access_id", + "hmac_key_secret" + ], + "properties": { + "credential_type": { + "type": "string", + "const": "HMAC_KEY", + "order": 0 + }, + "hmac_key_access_id": { + "type": "string", + "description": "HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long.", + "title": "HMAC Key Access ID", + "airbyte_secret": true, + "examples": ["1234567890abcdefghij1234"], + "order": 1 + }, + "hmac_key_secret": { + "type": "string", + "description": "The corresponding secret for the access ID. It is a 40-character base-64 encoded string.", + "title": "HMAC Key Secret", + "airbyte_secret": true, + "examples": [ + "1234567890abcdefghij1234567890ABCDEFGHIJ" + ], + "order": 2 + } + } + } + ] + }, + "gcs_bucket_name": { + "title": "GCS Bucket Name", + "type": "string", + "description": "The name of the GCS bucket. Read more here.", + "examples": ["airbyte_sync"], + "order": 2 + }, + "gcs_bucket_path": { + "title": "GCS Bucket Path", + "description": "Directory under the GCS bucket where data will be written. Read more here.", + "type": "string", + "examples": ["data_sync/test"], + "order": 3 + }, + "keep_files_in_gcs-bucket": { + "type": "string", + "description": "This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default \"Delete all tmp files from GCS\" value is used if not set explicitly.", + "title": "GCS Tmp Files Afterward Processing (Optional)", + "default": "Delete all tmp files from GCS", + "enum": [ + "Delete all tmp files from GCS", + "Keep all tmp files in GCS" + ], + "order": 4 + } + } + } + ] + }, + "credentials_json": { + "type": "string", + "description": "The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.", + "title": "Service Account Key JSON (Required for cloud, optional for open-source)", + "airbyte_secret": true, + "order": 3 + }, + "dataset_location": { + "type": "string", + "description": "The location of the dataset. Warning: Changes made after creation will not be applied. The default \"US\" value is used if not set explicitly. Read more here.", + "title": "Dataset Location (Optional)", + "default": "US", + "order": 4, + "enum": [ + "US", + "EU", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "europe-central2", + "europe-north1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "northamerica-northeast1", + "northamerica-northeast2", + "southamerica-east1", + "southamerica-west1", + "us-central1", + "us-east1", + "us-east4", + "us-west1", + "us-west2", + "us-west3", + "us-west4" + ] + }, + "big_query_client_buffer_size_mb": { + "title": "Google BigQuery Client Chunk Size (Optional)", + "description": "Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.", + "type": "integer", + "minimum": 1, + "maximum": 15, + "default": 15, + "examples": ["15"], + "order": 5 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-bigquery.json b/jvm/src/main/resources/airbyte/destination-bigquery.json new file mode 100644 index 0000000..1939c8e --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-bigquery.json @@ -0,0 +1,192 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/bigquery", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BigQuery Destination Spec", + "type": "object", + "required": ["project_id", "dataset_location", "dataset_id"], + "additionalProperties": true, + "properties": { + "project_id": { + "type": "string", + "description": "The GCP project ID for the project containing the target BigQuery dataset. Read more here.", + "title": "Project ID", + "order": 0 + }, + "dataset_location": { + "type": "string", + "description": "The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.", + "title": "Dataset Location", + "order": 1, + "enum": [ + "US", + "EU", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "europe-central2", + "europe-north1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "northamerica-northeast1", + "northamerica-northeast2", + "southamerica-east1", + "southamerica-west1", + "us-central1", + "us-east1", + "us-east4", + "us-west1", + "us-west2", + "us-west3", + "us-west4" + ] + }, + "dataset_id": { + "type": "string", + "description": "The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.", + "title": "Default Dataset ID", + "order": 2 + }, + "loading_method": { + "type": "object", + "title": "Loading Method", + "description": "Loading method used to send select the way data will be uploaded to BigQuery.
Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.", + "order": 3, + "oneOf": [ + { + "title": "Standard Inserts", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "Standard" + } + } + }, + { + "title": "GCS Staging", + "required": [ + "method", + "gcs_bucket_name", + "gcs_bucket_path", + "credential" + ], + "properties": { + "method": { + "type": "string", + "const": "GCS Staging", + "order": 0 + }, + "credential": { + "title": "Credential", + "description": "An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here.", + "type": "object", + "order": 1, + "oneOf": [ + { + "title": "HMAC key", + "required": [ + "credential_type", + "hmac_key_access_id", + "hmac_key_secret" + ], + "properties": { + "credential_type": { + "type": "string", + "const": "HMAC_KEY", + "order": 0 + }, + "hmac_key_access_id": { + "type": "string", + "description": "HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long.", + "title": "HMAC Key Access ID", + "airbyte_secret": true, + "examples": ["1234567890abcdefghij1234"], + "order": 1 + }, + "hmac_key_secret": { + "type": "string", + "description": "The corresponding secret for the access ID. It is a 40-character base-64 encoded string.", + "title": "HMAC Key Secret", + "airbyte_secret": true, + "examples": [ + "1234567890abcdefghij1234567890ABCDEFGHIJ" + ], + "order": 2 + } + } + } + ] + }, + "gcs_bucket_name": { + "title": "GCS Bucket Name", + "type": "string", + "description": "The name of the GCS bucket. Read more here.", + "examples": ["airbyte_sync"], + "order": 2 + }, + "gcs_bucket_path": { + "title": "GCS Bucket Path", + "description": "Directory under the GCS bucket where data will be written.", + "type": "string", + "examples": ["data_sync/test"], + "order": 3 + }, + "keep_files_in_gcs-bucket": { + "type": "string", + "description": "This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default \"Delete all tmp files from GCS\" value is used if not set explicitly.", + "title": "GCS Tmp Files Afterward Processing (Optional)", + "default": "Delete all tmp files from GCS", + "enum": [ + "Delete all tmp files from GCS", + "Keep all tmp files in GCS" + ], + "order": 4 + } + } + } + ] + }, + "credentials_json": { + "type": "string", + "description": "The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.", + "title": "Service Account Key JSON (Required for cloud, optional for open-source)", + "airbyte_secret": true, + "order": 4 + }, + "transformation_priority": { + "type": "string", + "description": "Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default \"interactive\" value is used if not set explicitly.", + "title": "Transformation Query Run Type (Optional)", + "default": "interactive", + "enum": ["interactive", "batch"], + "order": 5 + }, + "big_query_client_buffer_size_mb": { + "title": "Google BigQuery Client Chunk Size (Optional)", + "description": "Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.", + "type": "integer", + "minimum": 1, + "maximum": 15, + "default": 15, + "examples": ["15"], + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-cassandra.json b/jvm/src/main/resources/airbyte/destination-cassandra.json new file mode 100644 index 0000000..61e3c0a --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-cassandra.json @@ -0,0 +1,65 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/cassandra", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Cassandra Destination Spec", + "type": "object", + "required": ["keyspace", "username", "password", "address", "port"], + "additionalProperties": true, + "properties": { + "keyspace": { + "title": "Keyspace", + "description": "Default Cassandra keyspace to create data in.", + "type": "string", + "order": 0 + }, + "username": { + "title": "Username", + "description": "Username to use to access Cassandra.", + "type": "string", + "order": 1 + }, + "password": { + "title": "Password", + "description": "Password associated with Cassandra.", + "type": "string", + "airbyte_secret": true, + "order": 2 + }, + "address": { + "title": "Address", + "description": "Address to connect to.", + "type": "string", + "examples": ["localhost,127.0.0.1"], + "order": 3 + }, + "port": { + "title": "Port", + "description": "Port of Cassandra.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 9042, + "order": 4 + }, + "datacenter": { + "title": "Datacenter", + "description": "Datacenter of the cassandra cluster.", + "type": "string", + "default": "datacenter1", + "order": 5 + }, + "replication": { + "title": "Replication factor", + "type": "integer", + "description": "Indicates to how many nodes the data should be replicated to.", + "default": 1, + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-clickhouse.json b/jvm/src/main/resources/airbyte/destination-clickhouse.json new file mode 100644 index 0000000..adf44db --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-clickhouse.json @@ -0,0 +1,74 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/clickhouse", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ClickHouse Destination Spec", + "type": "object", + "required": ["host", "port", "database", "username"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "JDBC port (not the native port) of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 8123, + "examples": ["8123"], + "order": 1 + }, + "tcp-port": { + "title": "Native Port", + "description": "Native port (not the JDBC) of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 9000, + "examples": ["9000"], + "order": 2 + }, + "database": { + "title": "DB Name", + "description": "Name of the database.", + "type": "string", + "order": 3 + }, + "username": { + "title": "User", + "description": "Username to use to access the database.", + "type": "string", + "order": 4 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 5 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 6 + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL.", + "type": "boolean", + "default": false, + "order": 7 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-csv.json b/jvm/src/main/resources/airbyte/destination-csv.json new file mode 100644 index 0000000..389af67 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-csv.json @@ -0,0 +1,21 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/local-csv", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CSV Destination Spec", + "type": "object", + "required": ["destination_path"], + "additionalProperties": false, + "properties": { + "destination_path": { + "description": "Path to the directory where csv files will be written. The destination uses the local mount \"/local\" and any data files will be placed inside that local mount. For more information check out our docs", + "type": "string", + "examples": ["/local"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-databricks.json b/jvm/src/main/resources/airbyte/destination-databricks.json new file mode 100644 index 0000000..1704aed --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-databricks.json @@ -0,0 +1,178 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/databricks", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Databricks Lakehouse Destination Spec", + "type": "object", + "required": [ + "accept_terms", + "databricks_server_hostname", + "databricks_http_path", + "databricks_personal_access_token", + "data_source" + ], + "properties": { + "accept_terms": { + "title": "Agree to the Databricks JDBC Driver Terms & Conditions", + "type": "boolean", + "description": "You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector.", + "default": false, + "order": 1 + }, + "databricks_server_hostname": { + "title": "Server Hostname", + "type": "string", + "description": "Databricks Cluster Server Hostname.", + "examples": ["abc-12345678-wxyz.cloud.databricks.com"], + "order": 2 + }, + "databricks_http_path": { + "title": "HTTP Path", + "type": "string", + "description": "Databricks Cluster HTTP Path.", + "examples": ["sql/protocolvx/o/1234567489/0000-1111111-abcd90"], + "order": 3 + }, + "databricks_port": { + "title": "Port", + "type": "string", + "description": "Databricks Cluster Port.", + "default": "443", + "examples": ["443"], + "order": 4 + }, + "databricks_personal_access_token": { + "title": "Access Token", + "type": "string", + "description": "Databricks Personal Access Token for making authenticated requests.", + "examples": ["dapi0123456789abcdefghij0123456789AB"], + "airbyte_secret": true, + "order": 5 + }, + "database_schema": { + "title": "Database Schema", + "type": "string", + "description": "The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is \"public\".", + "default": "public", + "examples": ["public"], + "order": 6 + }, + "data_source": { + "title": "Data Source", + "type": "object", + "description": "Storage on which the delta lake is built.", + "oneOf": [ + { + "title": "Amazon S3", + "required": [ + "data_source_type", + "s3_bucket_name", + "s3_bucket_path", + "s3_bucket_region", + "s3_access_key_id", + "s3_secret_access_key" + ], + "properties": { + "data_source_type": { + "type": "string", + "enum": ["S3"], + "default": "S3", + "order": 1 + }, + "s3_bucket_name": { + "title": "S3 Bucket Name", + "type": "string", + "description": "The name of the S3 bucket to use for intermittent staging of the data.", + "examples": ["airbyte.staging"], + "order": 2 + }, + "s3_bucket_path": { + "title": "S3 Bucket Path", + "type": "string", + "description": "The directory under the S3 bucket where data will be written.", + "examples": ["data_sync/test"], + "order": 3 + }, + "s3_bucket_region": { + "title": "S3 Bucket Region", + "type": "string", + "default": "", + "description": "The region of the S3 staging bucket to use if utilising a copy strategy.", + "enum": [ + "", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "me-south-1", + "us-gov-east-1", + "us-gov-west-1" + ], + "order": 4 + }, + "s3_access_key_id": { + "type": "string", + "description": "The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket.", + "title": "S3 Access Key ID", + "examples": ["A012345678910EXAMPLE"], + "airbyte_secret": true, + "order": 5 + }, + "s3_secret_access_key": { + "title": "S3 Secret Access Key", + "type": "string", + "description": "The corresponding secret to the above access key id.", + "examples": ["a012345678910ABCDEFGH/AbCdEfGhEXAMPLEKEY"], + "airbyte_secret": true, + "order": 6 + }, + "file_name_pattern": { + "type": "string", + "description": "The pattern allows you to set the file-name format for the S3 staging file(s)", + "title": "S3 Filename pattern (Optional)", + "examples": [ + "{date}", + "{date:yyyy_MM}", + "{timestamp}", + "{part_number}", + "{sync_id}" + ], + "order": 7 + } + } + } + ], + "order": 7 + }, + "purge_staging_data": { + "title": "Purge Staging Files and Tables", + "type": "boolean", + "description": "Default to 'true'. Switch it to 'false' for debugging purpose.", + "default": true, + "order": 8 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-dynamodb.json b/jvm/src/main/resources/airbyte/destination-dynamodb.json new file mode 100644 index 0000000..5463149 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-dynamodb.json @@ -0,0 +1,82 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/dynamodb", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DynamoDB Destination Spec", + "type": "object", + "required": [ + "dynamodb_table_name_prefix", + "dynamodb_region", + "access_key_id", + "secret_access_key" + ], + "additionalProperties": false, + "properties": { + "dynamodb_endpoint": { + "title": "Endpoint", + "type": "string", + "default": "", + "description": "This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty).", + "examples": ["http://localhost:9000"] + }, + "dynamodb_table_name_prefix": { + "title": "Table name prefix", + "type": "string", + "description": "The prefix to use when naming DynamoDB tables.", + "examples": ["airbyte_sync"] + }, + "dynamodb_region": { + "title": "DynamoDB Region", + "type": "string", + "default": "", + "description": "The region of the DynamoDB.", + "enum": [ + "", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "me-south-1", + "us-gov-east-1", + "us-gov-west-1" + ] + }, + "access_key_id": { + "type": "string", + "description": "The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB.", + "title": "DynamoDB Key Id", + "airbyte_secret": true, + "examples": ["A012345678910EXAMPLE"] + }, + "secret_access_key": { + "type": "string", + "description": "The corresponding secret to the access key id.", + "title": "DynamoDB Access Key", + "airbyte_secret": true, + "examples": ["a012345678910ABCDEFGH/AbCdEfGhEXAMPLEKEY"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-elasticsearch.json b/jvm/src/main/resources/airbyte/destination-elasticsearch.json new file mode 100644 index 0000000..53e2eba --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-elasticsearch.json @@ -0,0 +1,93 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/elasticsearch", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsNamespaces": true, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Elasticsearch Connection Configuration", + "type": "object", + "required": ["endpoint"], + "additionalProperties": false, + "properties": { + "endpoint": { + "title": "Server Endpoint", + "type": "string", + "description": "The full url of the Elasticsearch server" + }, + "upsert": { + "type": "boolean", + "title": "Upsert Records", + "description": "If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys.", + "default": true + }, + "authenticationMethod": { + "title": "Authentication Method", + "type": "object", + "description": "The type of authentication to be used", + "oneOf": [ + { + "title": "None", + "additionalProperties": false, + "description": "No authentication will be used", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "none" + } + } + }, + { + "title": "Api Key/Secret", + "additionalProperties": false, + "description": "Use a api key and secret combination to authenticate", + "required": ["method", "apiKeyId", "apiKeySecret"], + "properties": { + "method": { + "type": "string", + "const": "secret" + }, + "apiKeyId": { + "title": "API Key ID", + "description": "The Key ID to used when accessing an enterprise Elasticsearch instance.", + "type": "string" + }, + "apiKeySecret": { + "title": "API Key Secret", + "description": "The secret associated with the API Key ID.", + "type": "string", + "airbyte_secret": true + } + } + }, + { + "title": "Username/Password", + "additionalProperties": false, + "description": "Basic auth header with a username and password", + "required": ["method", "username", "password"], + "properties": { + "method": { + "type": "string", + "const": "basic" + }, + "username": { + "title": "Username", + "description": "Basic auth username to access a secure Elasticsearch server", + "type": "string" + }, + "password": { + "title": "Password", + "description": "Basic auth password to access a secure Elasticsearch server", + "type": "string", + "airbyte_secret": true + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-firebolt.json b/jvm/src/main/resources/airbyte/destination-firebolt.json new file mode 100644 index 0000000..53f6d83 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-firebolt.json @@ -0,0 +1,109 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/firebolt", + "supported_destination_sync_modes": ["overwrite", "append"], + "supportsIncremental": true, + "supportsDBT": true, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Firebolt Spec", + "type": "object", + "required": ["username", "password", "database"], + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "title": "Username", + "description": "Firebolt email address you use to login.", + "examples": ["username@email.com"], + "order": 0 + }, + "password": { + "type": "string", + "title": "Password", + "description": "Firebolt password.", + "airbyte_secret": true, + "order": 1 + }, + "account": { + "type": "string", + "title": "Account", + "description": "Firebolt account to login." + }, + "host": { + "type": "string", + "title": "Host", + "description": "The host name of your Firebolt database.", + "examples": ["api.app.firebolt.io"] + }, + "database": { + "type": "string", + "title": "Database", + "description": "The database to connect to." + }, + "engine": { + "type": "string", + "title": "Engine", + "description": "Engine name or url to connect to." + }, + "loading_method": { + "type": "object", + "title": "Loading Method", + "description": "Loading method used to select the way data will be uploaded to Firebolt", + "oneOf": [ + { + "title": "SQL Inserts", + "additionalProperties": false, + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "SQL" + } + } + }, + { + "title": "External Table via S3", + "additionalProperties": false, + "required": [ + "method", + "s3_bucket", + "s3_region", + "aws_key_id", + "aws_key_secret" + ], + "properties": { + "method": { + "type": "string", + "const": "S3" + }, + "s3_bucket": { + "type": "string", + "title": "S3 bucket name", + "description": "The name of the S3 bucket." + }, + "s3_region": { + "type": "string", + "title": "S3 region name", + "description": "Region name of the S3 bucket.", + "examples": ["us-east-1"] + }, + "aws_key_id": { + "type": "string", + "title": "AWS Key ID", + "airbyte_secret": true, + "description": "AWS access key granting read and write access to S3." + }, + "aws_key_secret": { + "type": "string", + "title": "AWS Key Secret", + "airbyte_secret": true, + "description": "Corresponding secret part of the AWS Key" + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-firestore.json b/jvm/src/main/resources/airbyte/destination-firestore.json new file mode 100644 index 0000000..a0b0fbe --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-firestore.json @@ -0,0 +1,27 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/firestore", + "supported_destination_sync_modes": ["append", "overwrite"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination Google Firestore", + "type": "object", + "required": ["project_id"], + "additionalProperties": false, + "properties": { + "project_id": { + "type": "string", + "description": "The GCP project ID for the project containing the target BigQuery dataset.", + "title": "Project ID" + }, + "credentials_json": { + "type": "string", + "description": "The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.", + "title": "Credentials JSON", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-gcs.json b/jvm/src/main/resources/airbyte/destination-gcs.json new file mode 100644 index 0000000..13e68ca --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-gcs.json @@ -0,0 +1,382 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/gcs", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GCS Destination Spec", + "type": "object", + "required": ["gcs_bucket_name", "gcs_bucket_path", "credential", "format"], + "properties": { + "gcs_bucket_name": { + "title": "GCS Bucket Name", + "order": 1, + "type": "string", + "description": "You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here.", + "examples": ["airbyte_sync"] + }, + "gcs_bucket_path": { + "title": "GCS Bucket Path", + "description": "GCS Bucket Path string Subdirectory under the above bucket to sync the data into.", + "order": 2, + "type": "string", + "examples": ["data_sync/test"] + }, + "gcs_bucket_region": { + "title": "GCS Bucket Region (Optional)", + "type": "string", + "order": 3, + "default": "us", + "description": "Select a Region of the GCS Bucket. Read more here.", + "enum": [ + "northamerica-northeast1", + "northamerica-northeast2", + "us-central1", + "us-east1", + "us-east4", + "us-west1", + "us-west2", + "us-west3", + "us-west4", + "southamerica-east1", + "southamerica-west1", + "europe-central2", + "europe-north1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "asia", + "eu", + "us", + "asia1", + "eur4", + "nam4" + ] + }, + "credential": { + "title": "Authentication", + "description": "An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here.", + "type": "object", + "order": 0, + "oneOf": [ + { + "title": "HMAC Key", + "required": [ + "credential_type", + "hmac_key_access_id", + "hmac_key_secret" + ], + "properties": { + "credential_type": { + "type": "string", + "enum": ["HMAC_KEY"], + "default": "HMAC_KEY" + }, + "hmac_key_access_id": { + "type": "string", + "description": "When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here.", + "title": "Access ID", + "airbyte_secret": true, + "order": 0, + "examples": ["1234567890abcdefghij1234"] + }, + "hmac_key_secret": { + "type": "string", + "description": "The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here.", + "title": "Secret", + "airbyte_secret": true, + "order": 1, + "examples": ["1234567890abcdefghij1234567890ABCDEFGHIJ"] + } + } + } + ] + }, + "format": { + "title": "Output Format", + "type": "object", + "description": "Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format.", + "order": 4, + "oneOf": [ + { + "title": "Avro: Apache Avro", + "required": ["format_type", "compression_codec"], + "properties": { + "format_type": { + "type": "string", + "enum": ["Avro"], + "default": "Avro" + }, + "compression_codec": { + "title": "Compression Codec", + "description": "The compression algorithm used to compress data. Default to no compression.", + "type": "object", + "oneOf": [ + { + "title": "No Compression", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["no compression"], + "default": "no compression" + } + } + }, + { + "title": "Deflate", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["Deflate"], + "default": "Deflate" + }, + "compression_level": { + "title": "Deflate level (Optional)", + "description": "0: no compression & fastest, 9: best compression & slowest.", + "type": "integer", + "default": 0, + "minimum": 0, + "maximum": 9 + } + } + }, + { + "title": "bzip2", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["bzip2"], + "default": "bzip2" + } + } + }, + { + "title": "xz", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["xz"], + "default": "xz" + }, + "compression_level": { + "title": "Compression Level (Optional)", + "description": "The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details.", + "type": "integer", + "default": 6, + "minimum": 0, + "maximum": 9 + } + } + }, + { + "title": "zstandard", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["zstandard"], + "default": "zstandard" + }, + "compression_level": { + "title": "Compression Level (Optional)", + "description": "Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.", + "type": "integer", + "default": 3, + "minimum": -5, + "maximum": 22 + }, + "include_checksum": { + "title": "Include Checksum (Optional)", + "description": "If true, include a checksum with each data block.", + "type": "boolean", + "default": false + } + } + }, + { + "title": "snappy", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["snappy"], + "default": "snappy" + } + } + } + ] + } + } + }, + { + "title": "CSV: Comma-Separated Values", + "required": ["format_type"], + "properties": { + "format_type": { + "type": "string", + "enum": ["CSV"], + "default": "CSV" + }, + "flattening": { + "type": "string", + "title": "Normalization (Optional)", + "description": "Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details.", + "default": "No flattening", + "enum": ["No flattening", "Root level flattening"] + }, + "compression": { + "title": "Compression", + "type": "object", + "description": "Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: \".csv.gz\").", + "oneOf": [ + { + "title": "No Compression", + "requires": ["compression_type"], + "properties": { + "compression_type": { + "type": "string", + "enum": ["No Compression"], + "default": "No Compression" + } + } + }, + { + "title": "GZIP", + "requires": ["compression_type"], + "properties": { + "compression_type": { + "type": "string", + "enum": ["GZIP"], + "default": "GZIP" + } + } + } + ] + } + } + }, + { + "title": "JSON Lines: newline-delimited JSON", + "required": ["format_type"], + "properties": { + "format_type": { + "type": "string", + "enum": ["JSONL"], + "default": "JSONL" + }, + "compression": { + "title": "Compression", + "type": "object", + "description": "Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: \".jsonl.gz\").", + "oneOf": [ + { + "title": "No Compression", + "requires": "compression_type", + "properties": { + "compression_type": { + "type": "string", + "enum": ["No Compression"], + "default": "No Compression" + } + } + }, + { + "title": "GZIP", + "requires": "compression_type", + "properties": { + "compression_type": { + "type": "string", + "enum": ["GZIP"], + "default": "GZIP" + } + } + } + ] + } + } + }, + { + "title": "Parquet: Columnar Storage", + "required": ["format_type"], + "properties": { + "format_type": { + "type": "string", + "enum": ["Parquet"], + "default": "Parquet" + }, + "compression_codec": { + "title": "Compression Codec (Optional)", + "description": "The compression algorithm used to compress data pages.", + "type": "string", + "default": "UNCOMPRESSED", + "enum": [ + "UNCOMPRESSED", + "SNAPPY", + "GZIP", + "LZO", + "BROTLI", + "LZ4", + "ZSTD" + ] + }, + "block_size_mb": { + "title": "Block Size (Row Group Size) (MB) (Optional)", + "description": "This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.", + "type": "integer", + "default": 128, + "examples": [128] + }, + "max_padding_size_mb": { + "title": "Max Padding Size (MB) (Optional)", + "description": "Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.", + "type": "integer", + "default": 8, + "examples": [8] + }, + "page_size_kb": { + "title": "Page Size (KB) (Optional)", + "description": "The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.", + "type": "integer", + "default": 1024, + "examples": [1024] + }, + "dictionary_page_size_kb": { + "title": "Dictionary Page Size (KB) (Optional)", + "description": "There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.", + "type": "integer", + "default": 1024, + "examples": [1024] + }, + "dictionary_encoding": { + "title": "Dictionary Encoding (Optional)", + "description": "Default: true.", + "type": "boolean", + "default": true + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-google-sheets.json b/jvm/src/main/resources/airbyte/destination-google-sheets.json new file mode 100644 index 0000000..b550760 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-google-sheets.json @@ -0,0 +1,58 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/google-sheets", + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination Google Sheets", + "type": "object", + "required": ["spreadsheet_id", "credentials"], + "additionalProperties": false, + "properties": { + "spreadsheet_id": { + "type": "string", + "title": "Spreadsheet Link", + "description": "The link to your spreadsheet. See this guide for more details.", + "examples": [ + "https://docs.google.com/spreadsheets/d/1hLd9Qqti3UyLXZB2aFfUWDT7BG/edit" + ] + }, + "credentials": { + "type": "object", + "title": "* Authentication via Google (OAuth)", + "description": "Google API Credentials for connecting to Google Sheets and Google Drive APIs", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Google Sheets developer application.", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Google Sheets developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "The token for obtaining new access token.", + "airbyte_secret": true + } + } + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials"], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-jdbc.json b/jvm/src/main/resources/airbyte/destination-jdbc.json new file mode 100644 index 0000000..d3b352b --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-jdbc.json @@ -0,0 +1,39 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/postgres", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "JDBC Destination Spec", + "type": "object", + "required": ["username", "jdbc_url"], + "additionalProperties": true, + "properties": { + "username": { + "description": "The username which is used to access the database.", + "title": "Username", + "type": "string" + }, + "password": { + "description": "The password associated with this username.", + "title": "Password", + "type": "string", + "airbyte_secret": true + }, + "jdbc_url": { + "description": "JDBC formatted url. See the standard here.", + "title": "JDBC URL", + "type": "string" + }, + "schema": { + "description": "If you leave the schema unspecified, JDBC defaults to a schema named \"public\".", + "type": "string", + "examples": ["public"], + "default": "public", + "title": "Default Schema" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-kafka.json b/jvm/src/main/resources/airbyte/destination-kafka.json new file mode 100644 index 0000000..d64b596 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-kafka.json @@ -0,0 +1,259 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/kafka", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kafka Destination Spec", + "type": "object", + "required": [ + "bootstrap_servers", + "topic_pattern", + "protocol", + "acks", + "enable_idempotence", + "compression_type", + "batch_size", + "linger_ms", + "max_in_flight_requests_per_connection", + "client_dns_lookup", + "buffer_memory", + "max_request_size", + "retries", + "socket_connection_setup_timeout_ms", + "socket_connection_setup_timeout_max_ms", + "max_block_ms", + "request_timeout_ms", + "delivery_timeout_ms", + "send_buffer_bytes", + "receive_buffer_bytes" + ], + "additionalProperties": true, + "properties": { + "bootstrap_servers": { + "title": "Bootstrap Servers", + "description": "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).", + "type": "string", + "examples": ["kafka-broker1:9092,kafka-broker2:9092"] + }, + "topic_pattern": { + "title": "Topic Pattern", + "description": "Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.", + "type": "string", + "examples": ["sample.topic", "{namespace}.{stream}.sample"] + }, + "test_topic": { + "title": "Test Topic", + "description": "Topic to test if Airbyte can produce messages.", + "type": "string", + "examples": ["test.topic"] + }, + "sync_producer": { + "title": "Sync Producer", + "description": "Wait synchronously until the record has been sent to Kafka.", + "type": "boolean", + "default": false + }, + "protocol": { + "title": "Protocol", + "type": "object", + "description": "Protocol used to communicate with brokers.", + "oneOf": [ + { + "title": "PLAINTEXT", + "required": ["security_protocol"], + "properties": { + "security_protocol": { + "type": "string", + "enum": ["PLAINTEXT"], + "default": "PLAINTEXT" + } + } + }, + { + "title": "SASL PLAINTEXT", + "required": [ + "security_protocol", + "sasl_mechanism", + "sasl_jaas_config" + ], + "properties": { + "security_protocol": { + "type": "string", + "enum": ["SASL_PLAINTEXT"], + "default": "SASL_PLAINTEXT" + }, + "sasl_mechanism": { + "title": "SASL Mechanism", + "description": "SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.", + "type": "string", + "default": "PLAIN", + "enum": ["PLAIN"] + }, + "sasl_jaas_config": { + "title": "SASL JAAS Config", + "description": "JAAS login context parameters for SASL connections in the format used by JAAS configuration files.", + "type": "string", + "default": "", + "airbyte_secret": true + } + } + }, + { + "title": "SASL SSL", + "required": [ + "security_protocol", + "sasl_mechanism", + "sasl_jaas_config" + ], + "properties": { + "security_protocol": { + "type": "string", + "enum": ["SASL_SSL"], + "default": "SASL_SSL" + }, + "sasl_mechanism": { + "title": "SASL Mechanism", + "description": "SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.", + "type": "string", + "default": "GSSAPI", + "enum": [ + "GSSAPI", + "OAUTHBEARER", + "SCRAM-SHA-256", + "SCRAM-SHA-512", + "PLAIN" + ] + }, + "sasl_jaas_config": { + "title": "SASL JAAS Config", + "description": "JAAS login context parameters for SASL connections in the format used by JAAS configuration files.", + "type": "string", + "default": "", + "airbyte_secret": true + } + } + } + ] + }, + "client_id": { + "title": "Client ID", + "description": "An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.", + "type": "string", + "examples": ["airbyte-producer"] + }, + "acks": { + "title": "ACKs", + "description": "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent.", + "type": "string", + "default": "1", + "enum": ["0", "1", "all"] + }, + "enable_idempotence": { + "title": "Enable Idempotence", + "description": "When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.", + "type": "boolean", + "default": false + }, + "compression_type": { + "title": "Compression Type", + "description": "The compression type for all data generated by the producer.", + "type": "string", + "default": "none", + "enum": ["none", "gzip", "snappy", "lz4", "zstd"] + }, + "batch_size": { + "title": "Batch Size", + "description": "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition.", + "type": "integer", + "examples": [16384] + }, + "linger_ms": { + "title": "Linger ms", + "description": "The producer groups together any records that arrive in between request transmissions into a single batched request.", + "type": "string", + "examples": [0] + }, + "max_in_flight_requests_per_connection": { + "title": "Max in Flight Requests per Connection", + "description": "The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.", + "type": "integer", + "examples": [5] + }, + "client_dns_lookup": { + "title": "Client DNS Lookup", + "description": "Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.", + "type": "string", + "default": "use_all_dns_ips", + "enum": [ + "default", + "use_all_dns_ips", + "resolve_canonical_bootstrap_servers_only", + "use_all_dns_ips" + ] + }, + "buffer_memory": { + "title": "Buffer Memory", + "description": "The total bytes of memory the producer can use to buffer records waiting to be sent to the server.", + "type": "string", + "examples": 33554432 + }, + "max_request_size": { + "title": "Max Request Size", + "description": "The maximum size of a request in bytes.", + "type": "integer", + "examples": [1048576] + }, + "retries": { + "title": "Retries", + "description": "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.", + "type": "integer", + "examples": [2147483647] + }, + "socket_connection_setup_timeout_ms": { + "title": "Socket Connection Setup Timeout", + "description": "The amount of time the client will wait for the socket connection to be established.", + "type": "string", + "examples": [10000] + }, + "socket_connection_setup_timeout_max_ms": { + "title": "Socket Connection Setup Max Timeout", + "description": "The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.", + "type": "string", + "examples": [30000] + }, + "max_block_ms": { + "title": "Max Block ms", + "description": "The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.", + "type": "string", + "examples": [60000] + }, + "request_timeout_ms": { + "title": "Request Timeout", + "description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.", + "type": "integer", + "examples": [30000] + }, + "delivery_timeout_ms": { + "title": "Delivery Timeout", + "description": "An upper bound on the time to report success or failure after a call to 'send()' returns.", + "type": "integer", + "examples": [120000] + }, + "send_buffer_bytes": { + "title": "Send Buffer bytes", + "description": "The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.", + "type": "integer", + "examples": [131072] + }, + "receive_buffer_bytes": { + "title": "Receive Buffer bytes", + "description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.", + "type": "integer", + "examples": [32768] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-keen.json b/jvm/src/main/resources/airbyte/destination-keen.json new file mode 100644 index 0000000..56abae7 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-keen.json @@ -0,0 +1,35 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/keen", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Keen Spec", + "type": "object", + "required": ["project_id", "api_key"], + "additionalProperties": false, + "properties": { + "project_id": { + "description": "To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.", + "title": "Project ID", + "type": "string", + "examples": ["58b4acc22ba938934e888322e"] + }, + "api_key": { + "title": "API Key", + "description": "To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.", + "type": "string", + "examples": ["ABCDEFGHIJKLMNOPRSTUWXYZ"], + "airbyte_secret": true + }, + "infer_timestamp": { + "title": "Infer Timestamp", + "description": "Allow connector to guess keen.timestamp value based on the streamed data.", + "type": "boolean", + "default": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-kinesis.json b/jvm/src/main/resources/airbyte/destination-kinesis.json new file mode 100644 index 0000000..65aea2d --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-kinesis.json @@ -0,0 +1,58 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/kinesis", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kinesis Destination Spec", + "type": "object", + "required": ["shardCount", "accessKey", "privateKey", "bufferSize"], + "additionalProperties": true, + "properties": { + "endpoint": { + "title": "Endpoint", + "description": "AWS Kinesis endpoint.", + "type": "string", + "order": 0 + }, + "region": { + "title": "Region", + "description": "AWS region. Your account determines the Regions that are available to you.", + "type": "string", + "order": 1 + }, + "shardCount": { + "title": "Shard Count", + "description": "Number of shards to which the data should be streamed.", + "type": "integer", + "default": 5, + "order": 2 + }, + "accessKey": { + "title": "Access Key", + "description": "Generate the AWS Access Key for current user.", + "airbyte_secret": true, + "type": "string", + "order": 3 + }, + "privateKey": { + "title": "Private Key", + "description": "The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a \"recovery phrase\".", + "airbyte_secret": true, + "type": "string", + "order": 4 + }, + "bufferSize": { + "title": "Buffer Size", + "description": "Buffer size for storing kinesis records before being batch streamed.", + "type": "integer", + "minimum": 1, + "maximum": 500, + "default": 100, + "order": 5 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-kvdb.json b/jvm/src/main/resources/airbyte/destination-kvdb.json new file mode 100644 index 0000000..e394c1e --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-kvdb.json @@ -0,0 +1,28 @@ +{ + "documentationUrl": "https://kvdb.io/docs/api/", + "supported_destination_sync_modes": ["overwrite", "append"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination KVdb", + "type": "object", + "required": ["bucket_id", "secret_key"], + "additionalProperties": false, + "properties": { + "bucket_id": { + "title": "Bucket ID", + "type": "string", + "description": "The ID of your KVdb bucket.", + "order": 1 + }, + "secret_key": { + "title": "Secret Key", + "type": "string", + "description": "Your bucket Secret Key.", + "order": 2 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-local-json.json b/jvm/src/main/resources/airbyte/destination-local-json.json new file mode 100644 index 0000000..d6fc2f3 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-local-json.json @@ -0,0 +1,22 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/local-json", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Local Json Destination Spec", + "type": "object", + "required": ["destination_path"], + "additionalProperties": false, + "properties": { + "destination_path": { + "description": "Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs", + "title": "Destination Path", + "type": "string", + "examples": ["/json_data"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-mariadb-columnstore.json b/jvm/src/main/resources/airbyte/destination-mariadb-columnstore.json new file mode 100644 index 0000000..0c84779 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-mariadb-columnstore.json @@ -0,0 +1,51 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mariadb-columnstore", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MariaDB Columnstore Destination Spec", + "type": "object", + "required": ["host", "port", "username", "database"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "The Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "The Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 3306, + "examples": ["3306"], + "order": 1 + }, + "database": { + "title": "Database", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "username": { + "title": "Username", + "description": "The Username which is used to access the database.", + "type": "string", + "order": 3 + }, + "password": { + "title": "Password", + "description": "The Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 4 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-meilisearch.json b/jvm/src/main/resources/airbyte/destination-meilisearch.json new file mode 100644 index 0000000..e3d0095 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-meilisearch.json @@ -0,0 +1,29 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/meilisearch", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MeiliSearch Destination Spec", + "type": "object", + "required": ["host"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the MeiliSearch instance.", + "type": "string", + "order": 0 + }, + "api_key": { + "title": "API Key", + "airbyte_secret": true, + "description": "MeiliSearch API Key. See the docs for more information on how to obtain this key.", + "type": "string", + "order": 1 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-mongodb.json b/jvm/src/main/resources/airbyte/destination-mongodb.json new file mode 100644 index 0000000..f9b8c16 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-mongodb.json @@ -0,0 +1,148 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mongodb", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MongoDB Destination Spec", + "type": "object", + "required": ["database", "auth_type"], + "properties": { + "instance_type": { + "description": "MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.", + "title": "MongoDb Instance Type", + "type": "object", + "order": 0, + "oneOf": [ + { + "title": "Standalone MongoDb Instance", + "required": ["instance", "host", "port"], + "properties": { + "instance": { + "type": "string", + "enum": ["standalone"], + "default": "standalone" + }, + "host": { + "title": "Host", + "type": "string", + "description": "The Host of a Mongo database to be replicated.", + "order": 0 + }, + "port": { + "title": "Port", + "type": "integer", + "description": "The Port of a Mongo database to be replicated.", + "minimum": 0, + "maximum": 65536, + "default": 27017, + "examples": ["27017"], + "order": 1 + }, + "tls": { + "title": "TLS Connection", + "type": "boolean", + "description": "Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.", + "default": false, + "order": 2 + } + } + }, + { + "title": "Replica Set", + "required": ["instance", "server_addresses"], + "properties": { + "instance": { + "type": "string", + "enum": ["replica"], + "default": "replica" + }, + "server_addresses": { + "title": "Server addresses", + "type": "string", + "description": "The members of a replica set. Please specify `host`:`port` of each member seperated by comma.", + "examples": ["host1:27017,host2:27017,host3:27017"], + "order": 0 + }, + "replica_set": { + "title": "Replica Set", + "type": "string", + "description": "A replica set name.", + "order": 1 + } + } + }, + { + "title": "MongoDB Atlas", + "required": ["instance", "cluster_url"], + "properties": { + "instance": { + "type": "string", + "enum": ["atlas"], + "default": "atlas" + }, + "cluster_url": { + "title": "Cluster URL", + "type": "string", + "description": "URL of a cluster to connect to.", + "order": 0 + } + } + } + ] + }, + "database": { + "title": "DB Name", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "auth_type": { + "title": "Authorization type", + "type": "object", + "description": "Authorization type.", + "oneOf": [ + { + "title": "None", + "description": "None.", + "required": ["authorization"], + "type": "object", + "properties": { + "authorization": { + "type": "string", + "const": "none" + } + } + }, + { + "title": "Login/Password", + "description": "Login/Password.", + "required": ["authorization", "username", "password"], + "type": "object", + "properties": { + "authorization": { + "type": "string", + "const": "login/password" + }, + "username": { + "title": "User", + "description": "Username to use to access the database.", + "type": "string", + "order": 1 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 2 + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-mqtt.json b/jvm/src/main/resources/airbyte/destination-mqtt.json new file mode 100644 index 0000000..759bc66 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-mqtt.json @@ -0,0 +1,108 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mqtt", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MQTT Destination Spec", + "type": "object", + "required": [ + "broker_host", + "broker_port", + "use_tls", + "topic_pattern", + "publisher_sync", + "connect_timeout", + "automatic_reconnect", + "clean_session", + "message_retained", + "message_qos" + ], + "additionalProperties": true, + "properties": { + "broker_host": { + "title": "MQTT broker host", + "description": "Host of the broker to connect to.", + "type": "string" + }, + "broker_port": { + "title": "MQTT broker port", + "description": "Port of the broker.", + "type": "integer" + }, + "use_tls": { + "title": "Use TLS", + "description": "Whether to use TLS encryption on the connection.", + "type": "boolean", + "default": false + }, + "username": { + "title": "Username", + "description": "User name to use for the connection.", + "type": "string" + }, + "password": { + "title": "Password", + "description": "Password to use for the connection.", + "type": "string", + "airbyte_secret": true + }, + "topic_pattern": { + "title": "Topic pattern", + "description": "Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.", + "type": "string", + "examples": ["sample.topic", "{namespace}/{stream}/sample"] + }, + "topic_test": { + "title": "Test topic", + "description": "Topic to test if Airbyte can produce messages.", + "type": "string", + "examples": ["test/topic"] + }, + "client": { + "title": "Client ID", + "description": "A client identifier that is unique on the server being connected to.", + "type": "string", + "examples": ["airbyte-client1"] + }, + "publisher_sync": { + "title": "Sync publisher", + "description": "Wait synchronously until the record has been sent to the broker.", + "type": "boolean", + "default": false + }, + "connect_timeout": { + "title": "Connect timeout", + "description": " Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.", + "type": "integer", + "default": 30 + }, + "automatic_reconnect": { + "title": "Automatic reconnect", + "description": "Whether the client will automatically attempt to reconnect to the server if the connection is lost.", + "type": "boolean", + "default": true + }, + "clean_session": { + "title": "Clean session", + "description": "Whether the client and server should remember state across restarts and reconnects.", + "type": "boolean", + "default": true + }, + "message_retained": { + "title": "Message retained", + "description": "Whether or not the publish message should be retained by the messaging engine.", + "type": "boolean", + "default": false + }, + "message_qos": { + "title": "Message QoS", + "description": "Quality of service used for each message to be delivered.", + "default": "AT_LEAST_ONCE", + "enum": ["AT_MOST_ONCE", "AT_LEAST_ONCE", "EXACTLY_ONCE"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-mssql.json b/jvm/src/main/resources/airbyte/destination-mssql.json new file mode 100644 index 0000000..3aff969 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-mssql.json @@ -0,0 +1,120 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mssql", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MS SQL Server Destination Spec", + "type": "object", + "required": ["host", "port", "username", "database", "schema"], + "properties": { + "host": { + "title": "Host", + "description": "The host name of the MSSQL database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "The port of the MSSQL database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 1433, + "examples": ["1433"], + "order": 1 + }, + "database": { + "title": "DB Name", + "description": "The name of the MSSQL database.", + "type": "string", + "order": 2 + }, + "schema": { + "title": "Default Schema", + "description": "The default schema tables are written to if the source does not specify a namespace. The usual value for this field is \"public\".", + "type": "string", + "examples": ["public"], + "default": "public", + "order": 3 + }, + "username": { + "title": "User", + "description": "The username which is used to access the database.", + "type": "string", + "order": 4 + }, + "password": { + "title": "Password", + "description": "The password associated with this username.", + "type": "string", + "airbyte_secret": true, + "order": 5 + }, + "jdbc_url_params": { + "title": "JDBC URL Params", + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "type": "string", + "order": 6 + }, + "ssl_method": { + "title": "SSL Method", + "type": "object", + "description": "The encryption method which is used to communicate with the database.", + "order": 7, + "oneOf": [ + { + "title": "Unencrypted", + "description": "The data transfer will not be encrypted.", + "required": ["ssl_method"], + "type": "object", + "properties": { + "ssl_method": { + "type": "string", + "const": "unencrypted", + "enum": ["unencrypted"], + "default": "unencrypted" + } + } + }, + { + "title": "Encrypted (trust server certificate)", + "description": "Use the certificate provided by the server without verification. (For testing purposes only!)", + "required": ["ssl_method"], + "type": "object", + "properties": { + "ssl_method": { + "type": "string", + "const": "encrypted_trust_server_certificate", + "enum": ["encrypted_trust_server_certificate"], + "default": "encrypted_trust_server_certificate" + } + } + }, + { + "title": "Encrypted (verify certificate)", + "description": "Verify and use the certificate provided by the server.", + "required": ["ssl_method", "trustStoreName", "trustStorePassword"], + "type": "object", + "properties": { + "ssl_method": { + "type": "string", + "const": "encrypted_verify_certificate", + "enum": ["encrypted_verify_certificate"], + "default": "encrypted_verify_certificate" + }, + "hostNameInCertificate": { + "title": "Host Name In Certificate", + "type": "string", + "description": "Specifies the host name of the server. The value of this property must match the subject property of the certificate.", + "order": 8 + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-mysql.json b/jvm/src/main/resources/airbyte/destination-mysql.json new file mode 100644 index 0000000..0605667 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-mysql.json @@ -0,0 +1,64 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mysql", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MySQL Destination Spec", + "type": "object", + "required": ["host", "port", "username", "database"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 3306, + "examples": ["3306"], + "order": 1 + }, + "database": { + "title": "DB Name", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "username": { + "title": "User", + "description": "Username to use to access the database.", + "type": "string", + "order": 3 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL.", + "type": "boolean", + "default": true, + "order": 5 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-oracle.json b/jvm/src/main/resources/airbyte/destination-oracle.json new file mode 100644 index 0000000..d7fb730 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-oracle.json @@ -0,0 +1,126 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/oracle", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Oracle Destination Spec", + "type": "object", + "required": ["host", "port", "username", "sid"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "The hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "The port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 1521, + "examples": ["1521"], + "order": 1 + }, + "sid": { + "title": "SID", + "description": "The System Identifier uniquely distinguishes the instance from any other instance on the same computer.", + "type": "string", + "order": 2 + }, + "username": { + "title": "User", + "description": "The username to access the database. This user must have CREATE USER privileges in the database.", + "type": "string", + "order": 3 + }, + "password": { + "title": "Password", + "description": "The password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 5 + }, + "schema": { + "title": "Default Schema", + "description": "The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is \"airbyte\". In Oracle, schemas and users are the same thing, so the \"user\" parameter is used as the login credentials and this is used for the default Airbyte message schema.", + "type": "string", + "examples": ["airbyte"], + "default": "airbyte", + "order": 6 + }, + "encryption": { + "title": "Encryption", + "type": "object", + "description": "The encryption method which is used when communicating with the database.", + "order": 7, + "oneOf": [ + { + "title": "Unencrypted", + "description": "Data transfer will not be encrypted.", + "required": ["encryption_method"], + "properties": { + "encryption_method": { + "type": "string", + "const": "unencrypted", + "enum": ["unencrypted"], + "default": "unencrypted" + } + } + }, + { + "title": "Native Network Encryption (NNE)", + "description": "The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.", + "required": ["encryption_method"], + "properties": { + "encryption_method": { + "type": "string", + "const": "client_nne", + "enum": ["client_nne"], + "default": "client_nne" + }, + "encryption_algorithm": { + "type": "string", + "description": "This parameter defines the database encryption algorithm.", + "title": "Encryption Algorithm", + "default": "AES256", + "enum": ["AES256", "RC4_56", "3DES168"] + } + } + }, + { + "title": "TLS Encrypted (verify certificate)", + "description": "Verify and use the certificate provided by the server.", + "required": ["encryption_method", "ssl_certificate"], + "properties": { + "encryption_method": { + "type": "string", + "const": "encrypted_verify_certificate", + "enum": ["encrypted_verify_certificate"], + "default": "encrypted_verify_certificate" + }, + "ssl_certificate": { + "title": "SSL PEM file", + "description": "Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.", + "type": "string", + "airbyte_secret": true, + "multiline": true + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-postgres.json b/jvm/src/main/resources/airbyte/destination-postgres.json new file mode 100644 index 0000000..9117125 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-postgres.json @@ -0,0 +1,221 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/postgres", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Postgres Destination Spec", + "type": "object", + "required": ["host", "port", "username", "database", "schema"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 5432, + "examples": ["5432"], + "order": 1 + }, + "database": { + "title": "DB Name", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "schema": { + "title": "Default Schema", + "description": "The default schema tables are written to if the source does not specify a namespace. The usual value for this field is \"public\".", + "type": "string", + "examples": ["public"], + "default": "public", + "order": 3 + }, + "username": { + "title": "User", + "description": "Username to use to access the database.", + "type": "string", + "order": 4 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 5 + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL. When activating SSL, please select one of the connection modes.", + "type": "boolean", + "default": false, + "order": 6 + }, + "ssl_mode": { + "title": "SSL modes", + "description": "SSL connection modes. \n disable - Chose this mode to disable encryption of communication between Airbyte and destination database\n allow - Chose this mode to enable encryption only when required by the source database\n prefer - Chose this mode to allow unencrypted connection only if the source database does not support encryption\n require - Chose this mode to always require encryption. If the source database server does not support encryption, connection will fail\n verify-ca - Chose this mode to always require encryption and to verify that the source database server has a valid SSL certificate\n verify-full - This is the most secure mode. Chose this mode to always require encryption and to verify the identity of the source database server\n See more information - in the docs.", + "type": "object", + "order": 7, + "oneOf": [ + { + "title": "disable", + "additionalProperties": false, + "description": "Disable SSL.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "disable", + "enum": ["disable"], + "default": "disable", + "order": 0 + } + } + }, + { + "title": "allow", + "additionalProperties": false, + "description": "Allow SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "allow", + "enum": ["allow"], + "default": "allow", + "order": 0 + } + } + }, + { + "title": "prefer", + "additionalProperties": false, + "description": "Prefer SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "prefer", + "enum": ["prefer"], + "default": "prefer", + "order": 0 + } + } + }, + { + "title": "require", + "additionalProperties": false, + "description": "Require SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "require", + "enum": ["require"], + "default": "require", + "order": 0 + } + } + }, + { + "title": "verify-ca", + "additionalProperties": false, + "description": "Verify-ca SSL mode.", + "required": ["mode", "ca_certificate"], + "properties": { + "mode": { + "type": "string", + "const": "verify-ca", + "enum": ["verify-ca"], + "default": "verify-ca", + "order": 0 + }, + "ca_certificate": { + "type": "string", + "title": "CA certificate", + "description": "CA certificate", + "airbyte_secret": true, + "multiline": true, + "order": 1 + }, + "client_key_password": { + "type": "string", + "title": "Client key password (Optional)", + "description": "Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.", + "airbyte_secret": true, + "order": 4 + } + } + }, + { + "title": "verify-full", + "additionalProperties": false, + "description": "Verify-full SSL mode.", + "required": [ + "mode", + "ca_certificate", + "client_certificate", + "client_key" + ], + "properties": { + "mode": { + "type": "string", + "const": "verify-full", + "enum": ["verify-full"], + "default": "verify-full", + "order": 0 + }, + "ca_certificate": { + "type": "string", + "title": "CA certificate", + "description": "CA certificate", + "airbyte_secret": true, + "multiline": true, + "order": 1 + }, + "client_certificate": { + "type": "string", + "title": "Client certificate", + "description": "Client certificate", + "airbyte_secret": true, + "multiline": true, + "order": 2 + }, + "client_key": { + "type": "string", + "title": "Client key", + "description": "Client key", + "airbyte_secret": true, + "multiline": true, + "order": 3 + }, + "client_key_password": { + "type": "string", + "title": "Client key password (Optional)", + "description": "Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.", + "airbyte_secret": true, + "order": 4 + } + } + } + ] + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 8 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-pubsub.json b/jvm/src/main/resources/airbyte/destination-pubsub.json new file mode 100644 index 0000000..82bd13c --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-pubsub.json @@ -0,0 +1,32 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/pubsub", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google PubSub Destination Spec", + "type": "object", + "required": ["project_id", "topic_id", "credentials_json"], + "additionalProperties": true, + "properties": { + "project_id": { + "type": "string", + "description": "The GCP project ID for the project containing the target PubSub.", + "title": "Project ID" + }, + "topic_id": { + "type": "string", + "description": "The PubSub topic ID in the given GCP project ID.", + "title": "PubSub Topic ID" + }, + "credentials_json": { + "type": "string", + "description": "The contents of the JSON service account key. Check out the docs if you need help generating this key.", + "title": "Credentials JSON", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-pulsar.json b/jvm/src/main/resources/airbyte/destination-pulsar.json new file mode 100644 index 0000000..7dc40a0 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-pulsar.json @@ -0,0 +1,137 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/pulsar", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pulsar Destination Spec", + "type": "object", + "required": [ + "brokers", + "use_tls", + "topic_type", + "topic_tenant", + "topic_namespace", + "topic_pattern", + "compression_type", + "send_timeout_ms", + "max_pending_messages", + "max_pending_messages_across_partitions", + "batching_enabled", + "batching_max_messages", + "batching_max_publish_delay", + "block_if_queue_full" + ], + "additionalProperties": true, + "properties": { + "brokers": { + "title": "Pulsar brokers", + "description": "A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.", + "type": "string", + "examples": ["broker1:6650,broker2:6650"] + }, + "use_tls": { + "title": "Use TLS", + "description": "Whether to use TLS encryption on the connection.", + "type": "boolean", + "default": false + }, + "topic_type": { + "title": "Topic type", + "description": "It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.", + "type": "string", + "default": "persistent", + "enum": ["persistent", "non-persistent"] + }, + "topic_tenant": { + "title": "Topic tenant", + "description": "The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.", + "type": "string", + "default": "public", + "examples": ["public"] + }, + "topic_namespace": { + "title": "Topic namespace", + "description": "The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.", + "type": "string", + "default": "default", + "examples": ["default"] + }, + "topic_pattern": { + "title": "Topic pattern", + "description": "Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.", + "type": "string", + "examples": ["sample.topic", "{namespace}.{stream}.sample"] + }, + "topic_test": { + "title": "Test topic", + "description": "Topic to test if Airbyte can produce messages.", + "type": "string", + "examples": ["test.topic"] + }, + "producer_name": { + "title": "Producer name", + "description": "Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.", + "type": "string", + "examples": ["airbyte-producer"] + }, + "producer_sync": { + "title": "Sync producer", + "description": "Wait synchronously until the record has been sent to Pulsar.", + "type": "boolean", + "default": false + }, + "compression_type": { + "title": "Compression type", + "description": "Compression type for the producer.", + "type": "string", + "default": "NONE", + "enum": ["NONE", "LZ4", "ZLIB", "ZSTD", "SNAPPY"] + }, + "send_timeout_ms": { + "title": "Message send timeout", + "description": "If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).", + "type": "integer", + "default": 30000 + }, + "max_pending_messages": { + "title": "Max pending messages", + "description": "The maximum size of a queue holding pending messages.", + "type": "integer", + "default": 1000 + }, + "max_pending_messages_across_partitions": { + "title": "Max pending messages across partitions", + "description": "The maximum number of pending messages across partitions.", + "type": "integer", + "default": 50000 + }, + "batching_enabled": { + "title": "Enable batching", + "description": "Control whether automatic batching of messages is enabled for the producer.", + "type": "boolean", + "default": true + }, + "batching_max_messages": { + "title": "Batching max messages", + "description": "Maximum number of messages permitted in a batch.", + "type": "integer", + "default": 1000 + }, + "batching_max_publish_delay": { + "title": "Batching max publish delay", + "description": " Time period in milliseconds within which the messages sent will be batched.", + "type": "integer", + "default": 1 + }, + "block_if_queue_full": { + "title": "Block if queue is full", + "description": "If the send operation should block when the outgoing message queue is full.", + "type": "boolean", + "default": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-rabbitmq.json b/jvm/src/main/resources/airbyte/destination-rabbitmq.json new file mode 100644 index 0000000..212fbcd --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-rabbitmq.json @@ -0,0 +1,49 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/rabbitmq", + "supported_destination_sync_modes": ["append"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination Rabbitmq", + "type": "object", + "required": ["host", "routing_key"], + "additionalProperties": false, + "properties": { + "ssl": { + "type": "boolean", + "description": "SSL enabled.", + "default": true + }, + "host": { + "type": "string", + "description": "The RabbitMQ host name." + }, + "port": { + "type": "integer", + "description": "The RabbitMQ port." + }, + "virtual_host": { + "type": "string", + "description": "The RabbitMQ virtual host name." + }, + "username": { + "type": "string", + "description": "The username to connect." + }, + "password": { + "type": "string", + "description": "The password to connect." + }, + "exchange": { + "type": "string", + "description": "The exchange name." + }, + "routing_key": { + "type": "string", + "description": "The routing key." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-redis.json b/jvm/src/main/resources/airbyte/destination-redis.json new file mode 100644 index 0000000..ef2de6b --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-redis.json @@ -0,0 +1,53 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/redis", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Redis Destination Spec", + "type": "object", + "required": ["host", "port", "username", "password", "cache_type"], + "additionalProperties": false, + "properties": { + "host": { + "title": "Host", + "description": "Redis host to connect to.", + "type": "string", + "examples": ["localhost,127.0.0.1"], + "order": 1 + }, + "port": { + "title": "Port", + "description": "Port of Redis.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 6379, + "order": 2 + }, + "username": { + "title": "Username", + "description": "Username associated with Redis.", + "type": "string", + "order": 3 + }, + "password": { + "title": "Password", + "description": "Password associated with Redis.", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "cache_type": { + "title": "Cache type", + "type": "string", + "default": "hash", + "description": "Redis cache type to store data in.", + "enum": ["hash"], + "order": 5 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-redshift.json b/jvm/src/main/resources/airbyte/destination-redshift.json new file mode 100644 index 0000000..dd724d1 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-redshift.json @@ -0,0 +1,216 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/redshift", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Redshift Destination Spec", + "type": "object", + "required": ["host", "port", "database", "username", "password", "schema"], + "additionalProperties": true, + "properties": { + "host": { + "description": "Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)", + "type": "string", + "title": "Host", + "order": 1 + }, + "port": { + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 5439, + "examples": ["5439"], + "title": "Port", + "order": 2 + }, + "username": { + "description": "Username to use to access the database.", + "type": "string", + "title": "Username", + "order": 3 + }, + "password": { + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "title": "Password", + "order": 4 + }, + "database": { + "description": "Name of the database.", + "type": "string", + "title": "Database", + "order": 5 + }, + "schema": { + "description": "The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is \"public\".", + "type": "string", + "examples": ["public"], + "default": "public", + "title": "Default Schema", + "order": 6 + }, + "jdbc_url_params": { + "title": "JDBC URL Params", + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "type": "string", + "order": 7 + }, + "uploading_method": { + "title": "Uploading Method", + "type": "object", + "description": "The method how the data will be uploaded to the database.", + "order": 8, + "oneOf": [ + { + "title": "Standard", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "Standard" + } + } + }, + { + "title": "S3 Staging", + "required": [ + "method", + "s3_bucket_name", + "s3_bucket_region", + "access_key_id", + "secret_access_key" + ], + "properties": { + "method": { + "type": "string", + "const": "S3 Staging" + }, + "s3_bucket_name": { + "title": "S3 Bucket Name", + "type": "string", + "description": "The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.", + "examples": ["airbyte.staging"] + }, + "s3_bucket_path": { + "title": "S3 Bucket Path (Optional)", + "type": "string", + "description": "The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.", + "examples": ["data_sync/test"] + }, + "s3_bucket_region": { + "title": "S3 Bucket Region", + "type": "string", + "default": "", + "description": "The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.", + "enum": [ + "", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "me-south-1" + ] + }, + "file_name_pattern": { + "type": "string", + "description": "The pattern allows you to set the file-name format for the S3 staging file(s)", + "title": "S3 Filename pattern (Optional)", + "examples": [ + "{date}", + "{date:yyyy_MM}", + "{timestamp}", + "{part_number}", + "{sync_id}" + ], + "order": 8 + }, + "access_key_id": { + "type": "string", + "description": "This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.", + "title": "S3 Key Id", + "airbyte_secret": true + }, + "secret_access_key": { + "type": "string", + "description": "The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.", + "title": "S3 Access Key", + "airbyte_secret": true + }, + "purge_staging_data": { + "title": "Purge Staging Files and Tables (Optional)", + "type": "boolean", + "description": "Whether to delete the staging files from S3 after completing the sync. See docs for details.", + "default": true + }, + "encryption": { + "title": "Encryption", + "type": "object", + "description": "How to encrypt the staging data", + "default": { "encryption_type": "none" }, + "oneOf": [ + { + "title": "No encryption", + "description": "Staging data will be stored in plaintext.", + "type": "object", + "required": ["encryption_type"], + "properties": { + "encryption_type": { + "type": "string", + "const": "none", + "enum": ["none"], + "default": "none" + } + } + }, + { + "title": "AES-CBC envelope encryption", + "description": "Staging data will be encrypted using AES-CBC envelope encryption.", + "type": "object", + "required": ["encryption_type"], + "properties": { + "encryption_type": { + "type": "string", + "const": "aes_cbc_envelope", + "enum": ["aes_cbc_envelope"], + "default": "aes_cbc_envelope" + }, + "key_encrypting_key": { + "type": "string", + "title": "Key", + "description": "The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.", + "airbyte_secret": true + } + } + } + ] + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-rockset.json b/jvm/src/main/resources/airbyte/destination-rockset.json new file mode 100644 index 0000000..7eedb8e --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-rockset.json @@ -0,0 +1,39 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/rockset", + "supportsIncremental": true, + "supported_destination_sync_modes": ["append", "overwrite"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Rockset Destination Spec", + "type": "object", + "required": ["api_key", "workspace"], + "additionalProperties": false, + "properties": { + "api_key": { + "title": "Api Key", + "description": "Rockset api key", + "type": "string", + "order": 0, + "airbyte_secret": true + }, + "workspace": { + "title": "Workspace", + "description": "The Rockset workspace in which collections will be created + written to.", + "type": "string", + "examples": ["commons", "my_workspace"], + "default": "commons", + "airbyte_secret": false, + "order": 1 + }, + "api_server": { + "title": "Api Server", + "description": "Rockset api URL", + "type": "string", + "airbyte_secret": false, + "default": "https://api.rs2.usw2.rockset.com", + "pattern": "^https:\\/\\/.*.rockset.com$", + "order": 2 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-s3.json b/jvm/src/main/resources/airbyte/destination-s3.json new file mode 100644 index 0000000..db4022e --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-s3.json @@ -0,0 +1,390 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/s3", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "S3 Destination Spec", + "type": "object", + "required": [ + "s3_bucket_name", + "s3_bucket_path", + "s3_bucket_region", + "format" + ], + "properties": { + "access_key_id": { + "type": "string", + "description": "The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.", + "title": "S3 Key ID *", + "airbyte_secret": true, + "examples": ["A012345678910EXAMPLE"], + "order": 0 + }, + "secret_access_key": { + "type": "string", + "description": "The corresponding secret to the access key ID. Read more here", + "title": "S3 Access Key *", + "airbyte_secret": true, + "examples": ["a012345678910ABCDEFGH/AbCdEfGhEXAMPLEKEY"], + "order": 1 + }, + "s3_bucket_name": { + "title": "S3 Bucket Name", + "type": "string", + "description": "The name of the S3 bucket. Read more here.", + "examples": ["airbyte_sync"], + "order": 2 + }, + "s3_bucket_path": { + "title": "S3 Bucket Path", + "description": "Directory under the S3 bucket where data will be written. Read more here", + "type": "string", + "examples": ["data_sync/test"], + "order": 3 + }, + "s3_bucket_region": { + "title": "S3 Bucket Region", + "type": "string", + "default": "", + "description": "The region of the S3 bucket. See here for all region codes.", + "enum": [ + "", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "me-south-1", + "us-gov-east-1", + "us-gov-west-1" + ], + "order": 4 + }, + "format": { + "title": "Output Format *", + "type": "object", + "description": "Format of the data output. See here for more details", + "oneOf": [ + { + "title": "Avro: Apache Avro", + "required": ["format_type", "compression_codec"], + "properties": { + "format_type": { + "title": "Format Type *", + "type": "string", + "enum": ["Avro"], + "default": "Avro", + "order": 0 + }, + "compression_codec": { + "title": "Compression Codec *", + "description": "The compression algorithm used to compress data. Default to no compression.", + "type": "object", + "oneOf": [ + { + "title": "No Compression", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["no compression"], + "default": "no compression" + } + } + }, + { + "title": "Deflate", + "required": ["codec", "compression_level"], + "properties": { + "codec": { + "type": "string", + "enum": ["Deflate"], + "default": "Deflate" + }, + "compression_level": { + "title": "Deflate Level", + "description": "0: no compression & fastest, 9: best compression & slowest.", + "type": "integer", + "default": 0, + "minimum": 0, + "maximum": 9 + } + } + }, + { + "title": "bzip2", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["bzip2"], + "default": "bzip2" + } + } + }, + { + "title": "xz", + "required": ["codec", "compression_level"], + "properties": { + "codec": { + "type": "string", + "enum": ["xz"], + "default": "xz" + }, + "compression_level": { + "title": "Compression Level", + "description": "See here for details.", + "type": "integer", + "default": 6, + "minimum": 0, + "maximum": 9 + } + } + }, + { + "title": "zstandard", + "required": ["codec", "compression_level"], + "properties": { + "codec": { + "type": "string", + "enum": ["zstandard"], + "default": "zstandard" + }, + "compression_level": { + "title": "Compression Level", + "description": "Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.", + "type": "integer", + "default": 3, + "minimum": -5, + "maximum": 22 + }, + "include_checksum": { + "title": "Include Checksum", + "description": "If true, include a checksum with each data block.", + "type": "boolean", + "default": false + } + } + }, + { + "title": "snappy", + "required": ["codec"], + "properties": { + "codec": { + "type": "string", + "enum": ["snappy"], + "default": "snappy" + } + } + } + ], + "order": 1 + } + } + }, + { + "title": "CSV: Comma-Separated Values", + "required": ["format_type", "flattening"], + "properties": { + "format_type": { + "title": "Format Type *", + "type": "string", + "enum": ["CSV"], + "default": "CSV" + }, + "flattening": { + "type": "string", + "title": "Normalization (Flattening)", + "description": "Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.", + "default": "No flattening", + "enum": ["No flattening", "Root level flattening"] + }, + "compression": { + "title": "Compression", + "type": "object", + "description": "Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: \".csv.gz\").", + "oneOf": [ + { + "title": "No Compression", + "requires": ["compression_type"], + "properties": { + "compression_type": { + "type": "string", + "enum": ["No Compression"], + "default": "No Compression" + } + } + }, + { + "title": "GZIP", + "requires": ["compression_type"], + "properties": { + "compression_type": { + "type": "string", + "enum": ["GZIP"], + "default": "GZIP" + } + } + } + ] + } + } + }, + { + "title": "JSON Lines: Newline-delimited JSON", + "required": ["format_type"], + "properties": { + "format_type": { + "title": "Format Type *", + "type": "string", + "enum": ["JSONL"], + "default": "JSONL" + }, + "compression": { + "title": "Compression", + "type": "object", + "description": "Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: \".jsonl.gz\").", + "oneOf": [ + { + "title": "No Compression", + "requires": "compression_type", + "properties": { + "compression_type": { + "type": "string", + "enum": ["No Compression"], + "default": "No Compression" + } + } + }, + { + "title": "GZIP", + "requires": "compression_type", + "properties": { + "compression_type": { + "type": "string", + "enum": ["GZIP"], + "default": "GZIP" + } + } + } + ] + } + } + }, + { + "title": "Parquet: Columnar Storage", + "required": ["format_type"], + "properties": { + "format_type": { + "title": "Format Type *", + "type": "string", + "enum": ["Parquet"], + "default": "Parquet" + }, + "compression_codec": { + "title": "Compression Codec (Optional)", + "description": "The compression algorithm used to compress data pages.", + "type": "string", + "enum": [ + "UNCOMPRESSED", + "SNAPPY", + "GZIP", + "LZO", + "BROTLI", + "LZ4", + "ZSTD" + ], + "default": "UNCOMPRESSED" + }, + "block_size_mb": { + "title": "Block Size (Row Group Size) (MB) (Optional)", + "description": "This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.", + "type": "integer", + "default": 128, + "examples": [128] + }, + "max_padding_size_mb": { + "title": "Max Padding Size (MB) (Optional)", + "description": "Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.", + "type": "integer", + "default": 8, + "examples": [8] + }, + "page_size_kb": { + "title": "Page Size (KB) (Optional)", + "description": "The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.", + "type": "integer", + "default": 1024, + "examples": [1024] + }, + "dictionary_page_size_kb": { + "title": "Dictionary Page Size (KB) (Optional)", + "description": "There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.", + "type": "integer", + "default": 1024, + "examples": [1024] + }, + "dictionary_encoding": { + "title": "Dictionary Encoding (Optional)", + "description": "Default: true.", + "type": "boolean", + "default": true + } + } + } + ], + "order": 5 + }, + "s3_endpoint": { + "title": "Endpoint (Optional)", + "type": "string", + "default": "", + "description": "Your S3 endpoint url. Read more here", + "examples": ["http://localhost:9000"], + "order": 6 + }, + "s3_path_format": { + "title": "S3 Path Format (Optional)", + "description": "Format string on how data will be organized inside the S3 bucket directory. Read more here", + "type": "string", + "examples": [ + "${NAMESPACE}/${STREAM_NAME}/${YEAR}_${MONTH}_${DAY}_${EPOCH}_" + ], + "order": 7 + }, + "file_name_pattern": { + "type": "string", + "description": "The pattern allows you to set the file-name format for the S3 staging file(s)", + "title": "S3 Filename pattern (Optional)", + "examples": [ + "{date}", + "{date:yyyy_MM}", + "{timestamp}", + "{part_number}", + "{sync_id}" + ], + "order": 8 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-scylla.json b/jvm/src/main/resources/airbyte/destination-scylla.json new file mode 100644 index 0000000..6fbed67 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-scylla.json @@ -0,0 +1,57 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/scylla", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Scylla Destination Spec", + "type": "object", + "required": ["keyspace", "username", "password", "address", "port"], + "additionalProperties": true, + "properties": { + "keyspace": { + "title": "Keyspace", + "description": "Default Scylla keyspace to create data in.", + "type": "string", + "order": 0 + }, + "username": { + "title": "Username", + "description": "Username to use to access Scylla.", + "type": "string", + "order": 1 + }, + "password": { + "title": "Password", + "description": "Password associated with Scylla.", + "type": "string", + "airbyte_secret": true, + "order": 2 + }, + "address": { + "title": "Address", + "description": "Address to connect to.", + "type": "string", + "order": 3 + }, + "port": { + "title": "Port", + "description": "Port of Scylla.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 9042, + "order": 4 + }, + "replication": { + "title": "Replication factor", + "type": "integer", + "description": "Indicates to how many nodes the data should be replicated to.", + "default": 1, + "order": 5 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-sftp-json.json b/jvm/src/main/resources/airbyte/destination-sftp-json.json new file mode 100644 index 0000000..920997d --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-sftp-json.json @@ -0,0 +1,52 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/sftp-json", + "supported_destination_sync_modes": ["overwrite", "append"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination SFTP JSON", + "type": "object", + "required": ["host", "username", "password", "destination_path"], + "additionalProperties": false, + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the SFTP server.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "Port of the SFTP server.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 22, + "examples": [22], + "order": 1 + }, + "username": { + "title": "User", + "description": "Username to use to access the SFTP server.", + "type": "string", + "order": 2 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 3 + }, + "destination_path": { + "title": "Destination path", + "type": "string", + "description": "Path to the directory where json files will be written.", + "examples": ["/json_data"], + "order": 4 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-snowflake.json b/jvm/src/main/resources/airbyte/destination-snowflake.json new file mode 100644 index 0000000..e834527 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-snowflake.json @@ -0,0 +1,477 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/snowflake", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Snowflake Destination Spec", + "type": "object", + "required": ["host", "role", "warehouse", "database", "schema", "username"], + "additionalProperties": true, + "properties": { + "host": { + "description": "Enter your Snowflake account's locator (in the format ...snowflakecomputing.com)", + "examples": [ + "accountname.us-east-2.aws.snowflakecomputing.com", + "accountname.snowflakecomputing.com" + ], + "type": "string", + "title": "Host", + "order": 0 + }, + "role": { + "description": "Enter the role that you want to use to access Snowflake", + "examples": ["AIRBYTE_ROLE"], + "type": "string", + "title": "Role", + "order": 1 + }, + "warehouse": { + "description": "Enter the name of the warehouse that you want to sync data into", + "examples": ["AIRBYTE_WAREHOUSE"], + "type": "string", + "title": "Warehouse", + "order": 2 + }, + "database": { + "description": "Enter the name of the database you want to sync data into", + "examples": ["AIRBYTE_DATABASE"], + "type": "string", + "title": "Database", + "order": 3 + }, + "schema": { + "description": "Enter the name of the default schema", + "examples": ["AIRBYTE_SCHEMA"], + "type": "string", + "title": "Default Schema", + "order": 4 + }, + "username": { + "description": "Enter the name of the user you want to use to access the database", + "examples": ["AIRBYTE_USER"], + "type": "string", + "title": "Username", + "order": 5 + }, + "credentials": { + "title": "Authorization Method", + "description": "", + "type": "object", + "oneOf": [ + { + "title": "OAuth2.0", + "type": "object", + "order": 0, + "required": ["access_token", "refresh_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "OAuth2.0", + "enum": ["OAuth2.0"], + "default": "OAuth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "Enter your application's Client ID", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "Enter your application's Client secret", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Enter you application's Access Token", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Enter your application's Refresh Token", + "airbyte_secret": true + } + } + }, + { + "title": "Key Pair Authentication", + "type": "object", + "order": 1, + "required": ["private_key"], + "properties": { + "auth_type": { + "type": "string", + "const": "Key Pair Authentication", + "enum": ["Key Pair Authentication"], + "default": "Key Pair Authentication", + "order": 0 + }, + "private_key": { + "type": "string", + "title": "Private Key", + "description": "RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.", + "multiline": true, + "airbyte_secret": true + }, + "private_key_password": { + "type": "string", + "title": "Passphrase (Optional)", + "description": "Passphrase for private key", + "airbyte_secret": true + } + } + }, + { + "title": "Username and Password", + "type": "object", + "required": ["password"], + "order": 2, + "properties": { + "password": { + "description": "Enter the password associated with the username.", + "type": "string", + "airbyte_secret": true, + "title": "Password", + "order": 1 + } + } + } + ], + "order": 6 + }, + "jdbc_url_params": { + "description": "Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3", + "title": "JDBC URL Params", + "type": "string", + "order": 7 + }, + "loading_method": { + "type": "object", + "title": "Data Staging Method", + "description": "Select a data staging method", + "order": 8, + "oneOf": [ + { + "title": "Select another option", + "description": "Select another option", + "required": ["method"], + "properties": { + "method": { + "title": "", + "description": "", + "type": "string", + "enum": ["Standard"], + "default": "Standard" + } + } + }, + { + "title": "[Recommended] Internal Staging", + "description": "Recommended for large production workloads for better speed and scalability.", + "required": ["method"], + "properties": { + "method": { + "title": "", + "description": "", + "type": "string", + "enum": ["Internal Staging"], + "default": "Internal Staging" + } + } + }, + { + "title": "AWS S3 Staging", + "description": "Recommended for large production workloads for better speed and scalability.", + "required": [ + "method", + "s3_bucket_name", + "access_key_id", + "secret_access_key" + ], + "properties": { + "method": { + "title": "", + "description": "", + "type": "string", + "enum": ["S3 Staging"], + "default": "S3 Staging", + "order": 0 + }, + "s3_bucket_name": { + "title": "S3 Bucket Name", + "type": "string", + "description": "Enter your S3 bucket name", + "examples": ["airbyte.staging"], + "order": 1 + }, + "s3_bucket_region": { + "title": "S3 Bucket Region", + "type": "string", + "default": "", + "description": "Enter the region where your S3 bucket resides", + "enum": [ + "", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "eu-south-1", + "eu-north-1", + "sa-east-1", + "me-south-1" + ], + "order": 2 + }, + "access_key_id": { + "type": "string", + "description": "Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket ", + "title": "AWS access key ID", + "airbyte_secret": true, + "order": 3 + }, + "secret_access_key": { + "type": "string", + "description": "Enter your AWS secret access key", + "title": "AWS secret access key", + "airbyte_secret": true, + "order": 4 + }, + "purge_staging_data": { + "title": "Purge Staging Files and Tables", + "type": "boolean", + "description": "Toggle to delete staging files from the S3 bucket after a successful sync", + "default": true, + "order": 5 + }, + "encryption": { + "title": "Encryption", + "type": "object", + "description": "Choose a data encryption method for the staging data", + "default": { "encryption_type": "none" }, + "order": 6, + "oneOf": [ + { + "title": "No encryption", + "description": "Staging data will be stored in plaintext.", + "type": "object", + "required": ["encryption_type"], + "properties": { + "encryption_type": { + "type": "string", + "const": "none", + "enum": ["none"], + "default": "none" + } + } + }, + { + "title": "AES-CBC envelope encryption", + "description": "Staging data will be encrypted using AES-CBC envelope encryption.", + "type": "object", + "required": ["encryption_type"], + "properties": { + "encryption_type": { + "type": "string", + "const": "aes_cbc_envelope", + "enum": ["aes_cbc_envelope"], + "default": "aes_cbc_envelope" + }, + "key_encrypting_key": { + "type": "string", + "title": "Key", + "description": "The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.", + "airbyte_secret": true + } + } + } + ] + }, + "file_name_pattern": { + "type": "string", + "description": "The pattern allows you to set the file-name format for the S3 staging file(s)", + "title": "S3 Filename pattern (Optional)", + "examples": [ + "{date}", + "{date:yyyy_MM}", + "{timestamp}", + "{part_number}", + "{sync_id}" + ], + "order": 7 + } + } + }, + { + "title": "Google Cloud Storage Staging", + "description": "Recommended for large production workloads for better speed and scalability.", + "required": [ + "method", + "project_id", + "bucket_name", + "credentials_json" + ], + "properties": { + "method": { + "title": "", + "description": "", + "type": "string", + "enum": ["GCS Staging"], + "default": "GCS Staging", + "order": 0 + }, + "project_id": { + "title": "Google Cloud project ID", + "type": "string", + "description": "Enter the Google Cloud project ID", + "examples": ["my-project"], + "order": 1 + }, + "bucket_name": { + "title": "Cloud Storage bucket name", + "type": "string", + "description": "Enter the Cloud Storage bucket name", + "examples": ["airbyte-staging"], + "order": 2 + }, + "credentials_json": { + "title": "Google Application Credentials", + "type": "string", + "description": "Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket", + "airbyte_secret": true, + "multiline": true, + "order": 3 + } + } + }, + { + "title": "Azure Blob Storage Staging", + "description": "Recommended for large production workloads for better speed and scalability.", + "required": [ + "method", + "azure_blob_storage_account_name", + "azure_blob_storage_container_name", + "azure_blob_storage_sas_token" + ], + "properties": { + "method": { + "title": "", + "description": "", + "type": "string", + "enum": ["Azure Blob Staging"], + "default": "Azure Blob Staging", + "order": 0 + }, + "azure_blob_storage_endpoint_domain_name": { + "title": "Azure Blob Storage Endpoint", + "type": "string", + "default": "blob.core.windows.net", + "description": "Enter the Azure Blob Storage endpoint domain name", + "examples": ["blob.core.windows.net"], + "order": 1 + }, + "azure_blob_storage_account_name": { + "title": "Azure Blob Storage account name", + "type": "string", + "description": "Enter your Azure Blob Storage account name", + "examples": ["airbyte5storage"], + "order": 2 + }, + "azure_blob_storage_container_name": { + "title": "Azure Blob Storage Container Name", + "type": "string", + "description": "Enter your Azure Blob Storage container name", + "examples": ["airbytetestcontainername"], + "order": 3 + }, + "azure_blob_storage_sas_token": { + "title": "SAS Token", + "type": "string", + "airbyte_secret": true, + "description": "Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account", + "examples": [ + "?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D" + ], + "order": 4 + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "OAuth2.0", + "oauth_config_specification": { + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "properties": { + "host": { + "type": "string", + "path_in_connector_config": ["host"] + } + } + }, + "complete_oauth_output_specification": { + "type": "object", + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + }, + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-sqlite.json b/jvm/src/main/resources/airbyte/destination-sqlite.json new file mode 100644 index 0000000..5d7f145 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-sqlite.json @@ -0,0 +1,21 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/sqlite", + "supported_destination_sync_modes": ["overwrite", "append"], + "supportsIncremental": true, + "supportsDBT": false, + "supportsNormalization": false, + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Destination Sqlite", + "type": "object", + "required": ["destination_path"], + "additionalProperties": false, + "properties": { + "destination_path": { + "type": "string", + "description": "Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs", + "example": "/local/sqlite.db" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/destination-tidb.json b/jvm/src/main/resources/airbyte/destination-tidb.json new file mode 100644 index 0000000..d1fade3 --- /dev/null +++ b/jvm/src/main/resources/airbyte/destination-tidb.json @@ -0,0 +1,65 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/tidb", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_destination_sync_modes": ["overwrite", "append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "TiDB Destination Spec", + "type": "object", + "required": ["host", "port", "username", "database"], + "additionalProperties": true, + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 4000, + "examples": ["4000"], + "order": 1 + }, + "database": { + "title": "Database", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "username": { + "title": "User", + "description": "Username to use to access the database.", + "type": "string", + "order": 3 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "default": "", + "order": 4 + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL.", + "type": "boolean", + "default": false, + "order": 5 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/messages_airbyte_en b/jvm/src/main/resources/airbyte/messages_airbyte_en new file mode 100644 index 0000000..da1886a --- /dev/null +++ b/jvm/src/main/resources/airbyte/messages_airbyte_en @@ -0,0 +1,40465 @@ +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically haranacontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. harana requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. harana requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if harana can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if harana can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "harana". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default harana message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if harana can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. harana requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have harana generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint +datasources.section.destination-s3.s3_path_format.title=S3 Path Format +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. harana requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. harana requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have harana generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries harana will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to harana. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `haranahq/harana/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `haranahq/harana` for single repository, `haranahq/*` for get all repositories from organization and `haranahq/harana haranahq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. harana.com. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. harana.com/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. haranaio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.harana.com/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the harana can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. harana.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for harana to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow harana to access the database. +datasources.section.source-snowflake.database.description=The database you created for harana to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for harana to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for harana to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern (Optional) +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path (Optional) +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime (Optional) +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date (Optional) +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow Airbyte to access the database. +datasources.section.source-snowflake.database.description=The database you created for Airbyte to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for Airbyte to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for Airbyte to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain (Optional) +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern (Optional) +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path (Optional) +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime (Optional) +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date (Optional) +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow Airbyte to access the database. +datasources.section.source-snowflake.database.description=The database you created for Airbyte to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for Airbyte to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for Airbyte to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain (Optional) +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern (Optional) +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path (Optional) +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime (Optional) +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date (Optional) +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow Airbyte to access the database. +datasources.section.source-snowflake.database.description=The database you created for Airbyte to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for Airbyte to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for Airbyte to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain (Optional) +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern (Optional) +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path (Optional) +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime (Optional) +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date (Optional) +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow Airbyte to access the database. +datasources.section.source-snowflake.database.description=The database you created for Airbyte to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for Airbyte to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for Airbyte to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain (Optional) +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern (Optional) +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path (Optional) +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime (Optional) +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date (Optional) +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow Airbyte to access the database. +datasources.section.source-snowflake.database.description=The database you created for Airbyte to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for Airbyte to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for Airbyte to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain (Optional) +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). +datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key +datasources.section.destination-amazon-sqs.message_delay.title=Message Delay +datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id +datasources.section.destination-amazon-sqs.queue_url.title=Queue URL +datasources.section.destination-amazon-sqs.region.title=AWS Region +datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages +datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body. +datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds). +datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues. +datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages +datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id +datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name +datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn +datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User +datasources.section.destination-aws-datalake.credentials.title=Authentication mode +datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name +datasources.section.destination-aws-datalake.region.title=AWS Region +datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id +datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket +datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix +datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS. +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3 +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key +datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials +datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use +datasources.section.destination-aws-datalake.region.description=Region name +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes) +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values +datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON +datasources.section.destination-azure-blob-storage.format.title=Output Format +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key. +datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage. +datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp +datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example. +datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure. +datasources.section.destination-azure-blob-storage.format.description=Output data format +datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery.dataset_location.title=Dataset Location +datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery.loading_method.title=Loading Method +datasources.section.destination-bigquery.project_id.title=Project ID +datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional) +datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here. +datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. +datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-bigquery.transformation_priority.description=Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don’t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional) +datasources.section.destination-bigquery-denormalized.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source) +datasources.section.destination-bigquery-denormalized.dataset_id.title=Default Dataset ID +datasources.section.destination-bigquery-denormalized.dataset_location.title=Dataset Location (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.0.title=Standard Inserts +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.title=Credential +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional) +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.title=GCS Staging +datasources.section.destination-bigquery-denormalized.loading_method.title=Loading Method * +datasources.section.destination-bigquery-denormalized.project_id.title=Project ID +datasources.section.destination-bigquery-denormalized.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-bigquery-denormalized.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here. +datasources.section.destination-bigquery-denormalized.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery.
    Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging.
    GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.description=HMAC key access ID. When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_name.description=The name of the GCS bucket. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.gcs_bucket_path.description=Directory under the GCS bucket where data will be written. Read more here. +datasources.section.destination-bigquery-denormalized.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.description=This upload method is supposed to temporary store records in GCS bucket. By this select you can chose if these records should be removed from GCS when migration has finished. The default "Delete all tmp files from GCS" value is used if not set explicitly. +datasources.section.destination-bigquery-denormalized.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. Read more here. +datasources.section.destination-cassandra.address.title=Address +datasources.section.destination-cassandra.datacenter.title=Datacenter +datasources.section.destination-cassandra.keyspace.title=Keyspace +datasources.section.destination-cassandra.password.title=Password +datasources.section.destination-cassandra.port.title=Port +datasources.section.destination-cassandra.replication.title=Replication factor +datasources.section.destination-cassandra.username.title=Username +datasources.section.destination-cassandra.address.description=Address to connect to. +datasources.section.destination-cassandra.datacenter.description=Datacenter of the cassandra cluster. +datasources.section.destination-cassandra.keyspace.description=Default Cassandra keyspace to create data in. +datasources.section.destination-cassandra.password.description=Password associated with Cassandra. +datasources.section.destination-cassandra.port.description=Port of Cassandra. +datasources.section.destination-cassandra.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-cassandra.username.description=Username to use to access Cassandra. +datasources.section.destination-clickhouse.database.title=DB Name +datasources.section.destination-clickhouse.host.title=Host +datasources.section.destination-clickhouse.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-clickhouse.password.title=Password +datasources.section.destination-clickhouse.port.title=Port +datasources.section.destination-clickhouse.ssl.title=SSL Connection +datasources.section.destination-clickhouse.tcp-port.title=Native Port +datasources.section.destination-clickhouse.username.title=User +datasources.section.destination-clickhouse.database.description=Name of the database. +datasources.section.destination-clickhouse.host.description=Hostname of the database. +datasources.section.destination-clickhouse.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-clickhouse.password.description=Password associated with the username. +datasources.section.destination-clickhouse.port.description=JDBC port (not the native port) of the database. +datasources.section.destination-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.destination-clickhouse.tcp-port.description=Native port (not the JDBC) of the database. +datasources.section.destination-clickhouse.username.description=Username to use to access the database. +datasources.section.destination-csv.destination_path.description=Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-databricks.accept_terms.title=Agree to the Databricks JDBC Driver Terms & Conditions +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.title=S3 Access Key ID +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.title=S3 Secret Access Key +datasources.section.destination-databricks.data_source.oneOf.0.title=Amazon S3 +datasources.section.destination-databricks.data_source.title=Data Source +datasources.section.destination-databricks.database_schema.title=Database Schema +datasources.section.destination-databricks.databricks_http_path.title=HTTP Path +datasources.section.destination-databricks.databricks_personal_access_token.title=Access Token +datasources.section.destination-databricks.databricks_port.title=Port +datasources.section.destination-databricks.databricks_server_hostname.title=Server Hostname +datasources.section.destination-databricks.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-databricks.accept_terms.description=You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector. +datasources.section.destination-databricks.data_source.description=Storage on which the delta lake is built. +datasources.section.destination-databricks.data_source.oneOf.0.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_access_key_id.description=The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_name.description=The name of the S3 bucket to use for intermittent staging of the data. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.destination-databricks.data_source.oneOf.0.properties.s3_secret_access_key.description=The corresponding secret to the above access key id. +datasources.section.destination-databricks.database_schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-databricks.databricks_http_path.description=Databricks Cluster HTTP Path. +datasources.section.destination-databricks.databricks_personal_access_token.description=Databricks Personal Access Token for making authenticated requests. +datasources.section.destination-databricks.databricks_port.description=Databricks Cluster Port. +datasources.section.destination-databricks.databricks_server_hostname.description=Databricks Cluster Server Hostname. +datasources.section.destination-databricks.purge_staging_data.description=Default to 'true'. Switch it to 'false' for debugging purpose. +datasources.section.destination-dynamodb.access_key_id.title=DynamoDB Key Id +datasources.section.destination-dynamodb.dynamodb_endpoint.title=Endpoint +datasources.section.destination-dynamodb.dynamodb_region.title=DynamoDB Region +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.title=Table name prefix +datasources.section.destination-dynamodb.secret_access_key.title=DynamoDB Access Key +datasources.section.destination-dynamodb.access_key_id.description=The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_endpoint.description=This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty). +datasources.section.destination-dynamodb.dynamodb_region.description=The region of the DynamoDB. +datasources.section.destination-dynamodb.dynamodb_table_name_prefix.description=The prefix to use when naming DynamoDB tables. +datasources.section.destination-dynamodb.secret_access_key.description=The corresponding secret to the access key id. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.destination-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.destination-elasticsearch.endpoint.title=Server Endpoint +datasources.section.destination-elasticsearch.upsert.title=Upsert Records +datasources.section.destination-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.destination-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.destination-elasticsearch.upsert.description=If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys. +datasources.section.destination-firebolt.account.title=Account +datasources.section.destination-firebolt.database.title=Database +datasources.section.destination-firebolt.engine.title=Engine +datasources.section.destination-firebolt.host.title=Host +datasources.section.destination-firebolt.loading_method.oneOf.0.title=SQL Inserts +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.title=AWS Key ID +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.title=AWS Key Secret +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.title=S3 bucket name +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.title=S3 region name +datasources.section.destination-firebolt.loading_method.oneOf.1.title=External Table via S3 +datasources.section.destination-firebolt.loading_method.title=Loading Method +datasources.section.destination-firebolt.password.title=Password +datasources.section.destination-firebolt.username.title=Username +datasources.section.destination-firebolt.account.description=Firebolt account to login. +datasources.section.destination-firebolt.database.description=The database to connect to. +datasources.section.destination-firebolt.engine.description=Engine name or url to connect to. +datasources.section.destination-firebolt.host.description=The host name of your Firebolt database. +datasources.section.destination-firebolt.loading_method.description=Loading method used to select the way data will be uploaded to Firebolt +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_id.description=AWS access key granting read and write access to S3. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.aws_key_secret.description=Corresponding secret part of the AWS Key +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_bucket.description=The name of the S3 bucket. +datasources.section.destination-firebolt.loading_method.oneOf.1.properties.s3_region.description=Region name of the S3 bucket. +datasources.section.destination-firebolt.password.description=Firebolt password. +datasources.section.destination-firebolt.username.description=Firebolt email address you use to login. +datasources.section.destination-firestore.credentials_json.title=Credentials JSON +datasources.section.destination-firestore.project_id.title=Project ID +datasources.section.destination-firestore.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty. +datasources.section.destination-firestore.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.title=Access ID +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.title=Secret +datasources.section.destination-gcs.credential.oneOf.0.title=HMAC Key +datasources.section.destination-gcs.credential.title=Authentication +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum (Optional) +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.title=Compression Codec +datasources.section.destination-gcs.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.title=Normalization (Optional) +datasources.section.destination-gcs.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-gcs.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-gcs.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-gcs.format.oneOf.2.title=JSON Lines: newline-delimited JSON +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-gcs.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-gcs.format.title=Output Format +datasources.section.destination-gcs.gcs_bucket_name.title=GCS Bucket Name +datasources.section.destination-gcs.gcs_bucket_path.title=GCS Bucket Path +datasources.section.destination-gcs.gcs_bucket_region.title=GCS Bucket Region (Optional) +datasources.section.destination-gcs.credential.description=An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_access_id.description=When linked to a service account, this ID is 61 characters long; when linked to a user account, it is 24 characters long. Read more here. +datasources.section.destination-gcs.credential.oneOf.0.properties.hmac_key_secret.description=The corresponding secret for the access ID. It is a 40-character base-64 encoded string. Read more here. +datasources.section.destination-gcs.format.description=Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=The presets 0-3 are fast presets with medium compression. The presets 4-6 are fairly slow presets with high compression. The default preset is 6. The presets 7-9 are like the preset 6 but use bigger dictionaries and have higher compressor and decompressor memory requirements. Unless the uncompressed size of the file exceeds 8 MiB, 16 MiB, or 32 MiB, it is waste of memory to use the presets 7, 8, or 9, respectively. Read more here for details. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-gcs.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-gcs.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-gcs.format.oneOf.1.properties.flattening.description=Whether the input JSON data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-gcs.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-gcs.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-gcs.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-gcs.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-gcs.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-gcs.gcs_bucket_name.description=You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here. +datasources.section.destination-gcs.gcs_bucket_path.description=GCS Bucket Path string Subdirectory under the above bucket to sync the data into. +datasources.section.destination-gcs.gcs_bucket_region.description=Select a Region of the GCS Bucket. Read more here. +datasources.section.destination-google-sheets.credentials.properties.client_id.title=Client ID +datasources.section.destination-google-sheets.credentials.properties.client_secret.title=Client Secret +datasources.section.destination-google-sheets.credentials.properties.refresh_token.title=Refresh Token +datasources.section.destination-google-sheets.credentials.title=* Authentication via Google (OAuth) +datasources.section.destination-google-sheets.spreadsheet_id.title=Spreadsheet Link +datasources.section.destination-google-sheets.credentials.description=Google API Credentials for connecting to Google Sheets and Google Drive APIs +datasources.section.destination-google-sheets.credentials.properties.client_id.description=The Client ID of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.client_secret.description=The Client Secret of your Google Sheets developer application. +datasources.section.destination-google-sheets.credentials.properties.refresh_token.description=The token for obtaining new access token. +datasources.section.destination-google-sheets.spreadsheet_id.description=The link to your spreadsheet. See this guide for more details. +datasources.section.destination-jdbc.jdbc_url.title=JDBC URL +datasources.section.destination-jdbc.password.title=Password +datasources.section.destination-jdbc.schema.title=Default Schema +datasources.section.destination-jdbc.username.title=Username +datasources.section.destination-jdbc.jdbc_url.description=JDBC formatted url. See the standard here. +datasources.section.destination-jdbc.password.description=The password associated with this username. +datasources.section.destination-jdbc.schema.description=If you leave the schema unspecified, JDBC defaults to a schema named "public". +datasources.section.destination-jdbc.username.description=The username which is used to access the database. +datasources.section.destination-kafka.acks.title=ACKs +datasources.section.destination-kafka.batch_size.title=Batch Size +datasources.section.destination-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.destination-kafka.buffer_memory.title=Buffer Memory +datasources.section.destination-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.destination-kafka.client_id.title=Client ID +datasources.section.destination-kafka.compression_type.title=Compression Type +datasources.section.destination-kafka.delivery_timeout_ms.title=Delivery Timeout +datasources.section.destination-kafka.enable_idempotence.title=Enable Idempotence +datasources.section.destination-kafka.linger_ms.title=Linger ms +datasources.section.destination-kafka.max_block_ms.title=Max Block ms +datasources.section.destination-kafka.max_in_flight_requests_per_connection.title=Max in Flight Requests per Connection +datasources.section.destination-kafka.max_request_size.title=Max Request Size +datasources.section.destination-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.destination-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.destination-kafka.protocol.title=Protocol +datasources.section.destination-kafka.receive_buffer_bytes.title=Receive Buffer bytes +datasources.section.destination-kafka.request_timeout_ms.title=Request Timeout +datasources.section.destination-kafka.retries.title=Retries +datasources.section.destination-kafka.send_buffer_bytes.title=Send Buffer bytes +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.title=Socket Connection Setup Max Timeout +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.title=Socket Connection Setup Timeout +datasources.section.destination-kafka.sync_producer.title=Sync Producer +datasources.section.destination-kafka.test_topic.title=Test Topic +datasources.section.destination-kafka.topic_pattern.title=Topic Pattern +datasources.section.destination-kafka.acks.description=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. +datasources.section.destination-kafka.batch_size.description=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. +datasources.section.destination-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. +datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer. +datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns. +datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. +datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request. +datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. +datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5. +datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes. +datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. +datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used. +datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. +datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established. +datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka. +datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-keen.api_key.title=API Key +datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp +datasources.section.destination-keen.project_id.title=Project ID +datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data. +datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section. +datasources.section.destination-kinesis.accessKey.title=Access Key +datasources.section.destination-kinesis.bufferSize.title=Buffer Size +datasources.section.destination-kinesis.endpoint.title=Endpoint +datasources.section.destination-kinesis.privateKey.title=Private Key +datasources.section.destination-kinesis.region.title=Region +datasources.section.destination-kinesis.shardCount.title=Shard Count +datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user. +datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed. +datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint. +datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase". +datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you. +datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed. +datasources.section.destination-kvdb.bucket_id.title=Bucket ID +datasources.section.destination-kvdb.secret_key.title=Secret Key +datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket. +datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key. +datasources.section.destination-local-json.destination_path.title=Destination Path +datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs +datasources.section.destination-mariadb-columnstore.database.title=Database +datasources.section.destination-mariadb-columnstore.host.title=Host +datasources.section.destination-mariadb-columnstore.password.title=Password +datasources.section.destination-mariadb-columnstore.port.title=Port +datasources.section.destination-mariadb-columnstore.username.title=Username +datasources.section.destination-mariadb-columnstore.database.description=Name of the database. +datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database. +datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username. +datasources.section.destination-mariadb-columnstore.port.description=The Port of the database. +datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database. +datasources.section.destination-meilisearch.api_key.title=API Key +datasources.section.destination-meilisearch.host.title=Host +datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key. +datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance. +datasources.section.destination-mongodb.auth_type.oneOf.0.title=None +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User +datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password +datasources.section.destination-mongodb.auth_type.title=Authorization type +datasources.section.destination-mongodb.database.title=DB Name +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses +datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type +datasources.section.destination-mongodb.auth_type.description=Authorization type. +datasources.section.destination-mongodb.auth_type.oneOf.0.description=None. +datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username. +datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database. +datasources.section.destination-mongodb.database.description=Name of the database. +datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated. +datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name. +datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma. +datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to. +datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect +datasources.section.destination-mqtt.broker_host.title=MQTT broker host +datasources.section.destination-mqtt.broker_port.title=MQTT broker port +datasources.section.destination-mqtt.clean_session.title=Clean session +datasources.section.destination-mqtt.client.title=Client ID +datasources.section.destination-mqtt.connect_timeout.title=Connect timeout +datasources.section.destination-mqtt.message_qos.title=Message QoS +datasources.section.destination-mqtt.message_retained.title=Message retained +datasources.section.destination-mqtt.password.title=Password +datasources.section.destination-mqtt.publisher_sync.title=Sync publisher +datasources.section.destination-mqtt.topic_pattern.title=Topic pattern +datasources.section.destination-mqtt.topic_test.title=Test topic +datasources.section.destination-mqtt.use_tls.title=Use TLS +datasources.section.destination-mqtt.username.title=Username +datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost. +datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to. +datasources.section.destination-mqtt.broker_port.description=Port of the broker. +datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects. +datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to. +datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established. +datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered. +datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine. +datasources.section.destination-mqtt.password.description=Password to use for the connection. +datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker. +datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-mqtt.username.description=User name to use for the connection. +datasources.section.destination-mssql.database.title=DB Name +datasources.section.destination-mssql.host.title=Host +datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mssql.password.title=Password +datasources.section.destination-mssql.port.title=Port +datasources.section.destination-mssql.schema.title=Default Schema +datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.destination-mssql.ssl_method.title=SSL Method +datasources.section.destination-mssql.username.title=User +datasources.section.destination-mssql.database.description=The name of the MSSQL database. +datasources.section.destination-mssql.host.description=The host name of the MSSQL database. +datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mssql.password.description=The password associated with this username. +datasources.section.destination-mssql.port.description=The port of the MSSQL database. +datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database. +datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted. +datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.destination-mssql.username.description=The username which is used to access the database. +datasources.section.destination-mysql.database.title=DB Name +datasources.section.destination-mysql.host.title=Host +datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-mysql.password.title=Password +datasources.section.destination-mysql.port.title=Port +datasources.section.destination-mysql.ssl.title=SSL Connection +datasources.section.destination-mysql.username.title=User +datasources.section.destination-mysql.database.description=Name of the database. +datasources.section.destination-mysql.host.description=Hostname of the database. +datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-mysql.password.description=Password associated with the username. +datasources.section.destination-mysql.port.description=Port of the database. +datasources.section.destination-mysql.ssl.description=Encrypt data using SSL. +datasources.section.destination-mysql.username.description=Username to use to access the database. +datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file +datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.destination-oracle.encryption.title=Encryption +datasources.section.destination-oracle.host.title=Host +datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-oracle.password.title=Password +datasources.section.destination-oracle.port.title=Port +datasources.section.destination-oracle.schema.title=Default Schema +datasources.section.destination-oracle.sid.title=SID +datasources.section.destination-oracle.username.title=User +datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database. +datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm. +datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.destination-oracle.host.description=The hostname of the database. +datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-oracle.password.description=The password associated with the username. +datasources.section.destination-oracle.port.description=The port of the database. +datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema. +datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer. +datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database. +datasources.section.destination-postgres.database.title=DB Name +datasources.section.destination-postgres.host.title=Host +datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-postgres.password.title=Password +datasources.section.destination-postgres.port.title=Port +datasources.section.destination-postgres.schema.title=Default Schema +datasources.section.destination-postgres.ssl.title=SSL Connection +datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.destination-postgres.ssl_mode.title=SSL modes +datasources.section.destination-postgres.username.title=User +datasources.section.destination-postgres.database.description=Name of the database. +datasources.section.destination-postgres.host.description=Hostname of the database. +datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-postgres.password.description=Password associated with the username. +datasources.section.destination-postgres.port.description=Port of the database. +datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public". +datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.destination-postgres.ssl_mode.description=SSL connection modes. +datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.destination-postgres.username.description=Username to use to access the database. +datasources.section.destination-pubsub.credentials_json.title=Credentials JSON +datasources.section.destination-pubsub.project_id.title=Project ID +datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID +datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. +datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub. +datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID. +datasources.section.destination-pulsar.batching_enabled.title=Enable batching +datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages +datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay +datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full +datasources.section.destination-pulsar.brokers.title=Pulsar brokers +datasources.section.destination-pulsar.compression_type.title=Compression type +datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages +datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions +datasources.section.destination-pulsar.producer_name.title=Producer name +datasources.section.destination-pulsar.producer_sync.title=Sync producer +datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout +datasources.section.destination-pulsar.topic_namespace.title=Topic namespace +datasources.section.destination-pulsar.topic_pattern.title=Topic pattern +datasources.section.destination-pulsar.topic_tenant.title=Topic tenant +datasources.section.destination-pulsar.topic_test.title=Test topic +datasources.section.destination-pulsar.topic_type.title=Topic type +datasources.section.destination-pulsar.use_tls.title=Use TLS +datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer. +datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch. +datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched. +datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full. +datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster. +datasources.section.destination-pulsar.compression_type.description=Compression type for the producer. +datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages. +datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions. +datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with. +datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar. +datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms). +datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces. +datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention. +datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. +datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages. +datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk. +datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection. +datasources.section.destination-rabbitmq.exchange.description=The exchange name. +datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name. +datasources.section.destination-rabbitmq.password.description=The password to connect. +datasources.section.destination-rabbitmq.port.description=The RabbitMQ port. +datasources.section.destination-rabbitmq.routing_key.description=The routing key. +datasources.section.destination-rabbitmq.ssl.description=SSL enabled. +datasources.section.destination-rabbitmq.username.description=The username to connect. +datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name. +datasources.section.destination-redis.cache_type.title=Cache type +datasources.section.destination-redis.host.title=Host +datasources.section.destination-redis.password.title=Password +datasources.section.destination-redis.port.title=Port +datasources.section.destination-redis.username.title=Username +datasources.section.destination-redis.cache_type.description=Redis cache type to store data in. +datasources.section.destination-redis.host.description=Redis host to connect to. +datasources.section.destination-redis.password.description=Password associated with Redis. +datasources.section.destination-redis.port.description=Port of Redis. +datasources.section.destination-redis.username.description=Username associated with Redis. +datasources.section.destination-redshift.database.title=Database +datasources.section.destination-redshift.host.title=Host +datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-redshift.password.title=Password +datasources.section.destination-redshift.port.title=Port +datasources.section.destination-redshift.schema.title=Default Schema +datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key +datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging +datasources.section.destination-redshift.uploading_method.title=Uploading Method +datasources.section.destination-redshift.username.title=Username +datasources.section.destination-redshift.database.description=Name of the database. +datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com) +datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-redshift.password.description=Password associated with the username. +datasources.section.destination-redshift.port.description=Port of the database. +datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public". +datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details. +datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key. +datasources.section.destination-redshift.username.description=Username to use to access the database. +datasources.section.destination-rockset.api_key.title=Api Key +datasources.section.destination-rockset.api_server.title=Api Server +datasources.section.destination-rockset.workspace.title=Workspace +datasources.section.destination-rockset.api_key.description=Rockset api key +datasources.section.destination-rockset.api_server.description=Rockset api URL +datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to. +datasources.section.destination-s3.access_key_id.title=S3 Key ID * +datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2 +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec * +datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening) +datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression +datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP +datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression +datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type * +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional) +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional) +datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage +datasources.section.destination-s3.format.title=Output Format * +datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path +datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional) +datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional) +datasources.section.destination-s3.secret_access_key.title=S3 Access Key * +datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here. +datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-s3.format.description=Format of the data output. See here for more details +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory. +datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block. +datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz"). +datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details. +datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz"). +datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB. +datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true. +datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB. +datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB. +datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB. +datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here. +datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here +datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes. +datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here +datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here +datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here +datasources.section.destination-scylla.address.title=Address +datasources.section.destination-scylla.keyspace.title=Keyspace +datasources.section.destination-scylla.password.title=Password +datasources.section.destination-scylla.port.title=Port +datasources.section.destination-scylla.replication.title=Replication factor +datasources.section.destination-scylla.username.title=Username +datasources.section.destination-scylla.address.description=Address to connect to. +datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in. +datasources.section.destination-scylla.password.description=Password associated with Scylla. +datasources.section.destination-scylla.port.description=Port of Scylla. +datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to. +datasources.section.destination-scylla.username.description=Username to use to access Scylla. +datasources.section.destination-sftp-json.destination_path.title=Destination path +datasources.section.destination-sftp-json.host.title=Host +datasources.section.destination-sftp-json.password.title=Password +datasources.section.destination-sftp-json.port.title=Port +datasources.section.destination-sftp-json.username.title=User +datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written. +datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server. +datasources.section.destination-sftp-json.password.description=Password associated with the username. +datasources.section.destination-sftp-json.port.description=Port of the SFTP server. +datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server. +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional) +datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password +datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password +datasources.section.destination-snowflake.credentials.title=Authorization Method +datasources.section.destination-snowflake.database.title=Database +datasources.section.destination-snowflake.host.title=Host +datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title= +datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging +datasources.section.destination-snowflake.loading_method.title=Data Staging Method +datasources.section.destination-snowflake.role.title=Role +datasources.section.destination-snowflake.schema.title=Default Schema +datasources.section.destination-snowflake.username.title=Username +datasources.section.destination-snowflake.warehouse.title=Warehouse +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.description= +datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID +datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret +datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key. +datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key +datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username. +datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into +datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format ...snowflakecomputing.com) +datasources.section.destination-snowflake.jdbc_url_params.description=Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3 +datasources.section.destination-snowflake.loading_method.description=Select a data staging method +datasources.section.destination-snowflake.loading_method.oneOf.0.description=Select another option +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.description=Enter your AWS access key ID. Airbyte requires Read and Write permissions on your S3 bucket +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.description=Choose a data encryption method for the staging data +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync. +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s) +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.description=Toggle to delete staging files from the S3 bucket after a successful sync +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.description=Enter your S3 bucket name +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.description=Enter the region where your S3 bucket resides +datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.description=Enter your AWS secret access key +datasources.section.destination-snowflake.loading_method.oneOf.3.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.description=Enter the Cloud Storage bucket name +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.description=Enter your Google Cloud service account key in the JSON format with read/write access to your Cloud Storage staging bucket +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.description=Enter the Google Cloud project ID +datasources.section.destination-snowflake.loading_method.oneOf.4.description=Recommended for large production workloads for better speed and scalability. +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.description=Enter your Azure Blob Storage account name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.description=Enter your Azure Blob Storage container name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.description=Enter the Azure Blob Storage endpoint domain name +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.description=Enter the Shared access signature (SAS) token to grant Snowflake limited access to objects in your Azure Blob Storage account +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.description= +datasources.section.destination-snowflake.role.description=Enter the role that you want to use to access Snowflake +datasources.section.destination-snowflake.schema.description=Enter the name of the default schema +datasources.section.destination-snowflake.username.description=Enter the name of the user you want to use to access the database +datasources.section.destination-snowflake.warehouse.description=Enter the name of the warehouse that you want to sync data into +datasources.section.destination-sqlite.destination_path.description=Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs +datasources.section.destination-tidb.database.title=Database +datasources.section.destination-tidb.host.title=Host +datasources.section.destination-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.destination-tidb.password.title=Password +datasources.section.destination-tidb.port.title=Port +datasources.section.destination-tidb.ssl.title=SSL Connection +datasources.section.destination-tidb.username.title=User +datasources.section.destination-tidb.database.description=Name of the database. +datasources.section.destination-tidb.host.description=Hostname of the database. +datasources.section.destination-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.destination-tidb.password.description=Password associated with the username. +datasources.section.destination-tidb.port.description=Port of the database. +datasources.section.destination-tidb.ssl.description=Encrypt data using SSL. +datasources.section.destination-tidb.username.description=Username to use to access the database. +datasources.section.source-airtable.api_key.title=API Key +datasources.section.source-airtable.base_id.title=Base ID +datasources.section.source-airtable.tables.title=Tables +datasources.section.source-airtable.api_key.description=The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key. +datasources.section.source-airtable.base_id.description=The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs. +datasources.section.source-airtable.tables.description=The list of Tables to integrate. +datasources.section.source-amazon-ads.auth_type.title=Auth Type +datasources.section.source-amazon-ads.client_id.title=Client ID +datasources.section.source-amazon-ads.client_secret.title=Client Secret +datasources.section.source-amazon-ads.profiles.title=Profile IDs (Optional) +datasources.section.source-amazon-ads.refresh_token.title=Refresh Token +datasources.section.source-amazon-ads.region.title=Region * +datasources.section.source-amazon-ads.report_generation_max_retries.title=Report Generation Maximum Retries * +datasources.section.source-amazon-ads.report_wait_timeout.title=Report Wait Timeout * +datasources.section.source-amazon-ads.start_date.title=Start Date (Optional) +datasources.section.source-amazon-ads.client_id.description=The client ID of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.client_secret.description=The client secret of your Amazon Ads developer application. See the docs for more information. +datasources.section.source-amazon-ads.profiles.description=Profile IDs you want to fetch data for. See docs for more details. +datasources.section.source-amazon-ads.refresh_token.description=Amazon Ads refresh token. See the docs for more information on how to obtain this token. +datasources.section.source-amazon-ads.region.description=Region to pull data from (EU/NA/FE). See docs for more details. +datasources.section.source-amazon-ads.report_generation_max_retries.description=Maximum retries Airbyte will attempt for fetching report data. Default is 5. +datasources.section.source-amazon-ads.report_wait_timeout.description=Timeout duration in minutes for Reports. Default is 30 minutes. +datasources.section.source-amazon-ads.start_date.description=The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format +datasources.section.source-amazon-seller-partner.app_id.title=App Id * +datasources.section.source-amazon-seller-partner.auth_type.title=Auth Type +datasources.section.source-amazon-seller-partner.aws_access_key.title=AWS Access Key +datasources.section.source-amazon-seller-partner.aws_environment.title=AWSEnvironment +datasources.section.source-amazon-seller-partner.aws_secret_key.title=AWS Secret Access Key +datasources.section.source-amazon-seller-partner.lwa_app_id.title=LWA Client Id +datasources.section.source-amazon-seller-partner.lwa_client_secret.title=LWA Client Secret +datasources.section.source-amazon-seller-partner.max_wait_seconds.title=Max wait time for reports (in seconds) +datasources.section.source-amazon-seller-partner.period_in_days.title=Period In Days +datasources.section.source-amazon-seller-partner.refresh_token.title=Refresh Token +datasources.section.source-amazon-seller-partner.region.title=AWSRegion +datasources.section.source-amazon-seller-partner.replication_end_date.title=End Date +datasources.section.source-amazon-seller-partner.replication_start_date.title=Start Date +datasources.section.source-amazon-seller-partner.report_options.title=Report Options +datasources.section.source-amazon-seller-partner.role_arn.title=Role ARN +datasources.section.source-amazon-seller-partner.app_id.description=Your Amazon App ID +datasources.section.source-amazon-seller-partner.aws_access_key.description=Specifies the AWS access key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.aws_environment.description=An enumeration. +datasources.section.source-amazon-seller-partner.aws_secret_key.description=Specifies the AWS secret key used as part of the credentials to authenticate the user. +datasources.section.source-amazon-seller-partner.lwa_app_id.description=Your Login with Amazon Client ID. +datasources.section.source-amazon-seller-partner.lwa_client_secret.description=Your Login with Amazon Client Secret. +datasources.section.source-amazon-seller-partner.max_wait_seconds.description=Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report. +datasources.section.source-amazon-seller-partner.period_in_days.description=Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync. +datasources.section.source-amazon-seller-partner.refresh_token.description=The Refresh Token obtained via OAuth flow authorization. +datasources.section.source-amazon-seller-partner.region.description=An enumeration. +datasources.section.source-amazon-seller-partner.replication_end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. +datasources.section.source-amazon-seller-partner.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-amazon-seller-partner.report_options.description=Additional information passed to reports. This varies by report type. Must be a valid json string. +datasources.section.source-amazon-seller-partner.role_arn.description=Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS). +datasources.section.source-amazon-sqs.access_key.title=AWS IAM Access Key ID +datasources.section.source-amazon-sqs.attributes_to_return.title=Message Attributes To Return +datasources.section.source-amazon-sqs.delete_messages.title=Delete Messages After Read +datasources.section.source-amazon-sqs.max_batch_size.title=Max Batch Size +datasources.section.source-amazon-sqs.max_wait_time.title=Max Wait Time +datasources.section.source-amazon-sqs.queue_url.title=Queue URL +datasources.section.source-amazon-sqs.region.title=AWS Region +datasources.section.source-amazon-sqs.secret_key.title=AWS IAM Secret Key +datasources.section.source-amazon-sqs.visibility_timeout.title=Message Visibility Timeout +datasources.section.source-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.attributes_to_return.description=Comma separated list of Mesage Attribute names to return +datasources.section.source-amazon-sqs.delete_messages.description=If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. +datasources.section.source-amazon-sqs.max_batch_size.description=Max amount of messages to get in one batch (10 max) +datasources.section.source-amazon-sqs.max_wait_time.description=Max amount of time in seconds to wait for messages in a single poll (20 max) +datasources.section.source-amazon-sqs.queue_url.description=URL of the SQS Queue +datasources.section.source-amazon-sqs.region.description=AWS Region of the SQS Queue +datasources.section.source-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for pulling messages +datasources.section.source-amazon-sqs.visibility_timeout.description=Modify the Visibility Timeout of the individual message from the Queue's default (seconds). +datasources.section.source-amplitude.api_key.title=API Key +datasources.section.source-amplitude.secret_key.title=Secret Key +datasources.section.source-amplitude.start_date.title=Replication Start Date +datasources.section.source-amplitude.api_key.description=Amplitude API Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.secret_key.description=Amplitude Secret Key. See the setup guide for more information on how to obtain this key. +datasources.section.source-amplitude.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-apify-dataset.clean.title=Clean +datasources.section.source-apify-dataset.datasetId.title=Dataset ID +datasources.section.source-apify-dataset.clean.description=If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false. +datasources.section.source-apify-dataset.datasetId.description=ID of the dataset you would like to load to Airbyte. +datasources.section.source-appsflyer.api_token.description=Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard. +datasources.section.source-appsflyer.app_id.description=App identifier as found in AppsFlyer. +datasources.section.source-appsflyer.start_date.description=The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days. +datasources.section.source-appsflyer.timezone.description=Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console. +datasources.section.source-appstore-singer.issuer_id.title=Issuer ID +datasources.section.source-appstore-singer.key_id.title=Key ID +datasources.section.source-appstore-singer.private_key.title=Private Key +datasources.section.source-appstore-singer.start_date.title=Start Date +datasources.section.source-appstore-singer.vendor.title=Vendor ID +datasources.section.source-appstore-singer.issuer_id.description=Appstore Issuer ID. See the docs for more information on how to obtain this ID. +datasources.section.source-appstore-singer.key_id.description=Appstore Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.private_key.description=Appstore Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-appstore-singer.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-appstore-singer.vendor.description=Appstore Vendor ID. See the docs for more information on how to obtain this ID. +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.title=Personal Access Token +datasources.section.source-asana.credentials.oneOf.0.title=Authenticate with Personal Access Token +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.title= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.title=Credentials title +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.title= +datasources.section.source-asana.credentials.oneOf.1.title=Authenticate via Asana (Oauth) +datasources.section.source-asana.credentials.title=Authentication mechanism +datasources.section.source-asana.credentials.description=Choose how to authenticate to Github +datasources.section.source-asana.credentials.oneOf.0.properties.option_title.description=PAT Credentials +datasources.section.source-asana.credentials.oneOf.0.properties.personal_access_token.description=Asana Personal Access Token (generate yours here). +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_id.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.client_secret.description= +datasources.section.source-asana.credentials.oneOf.1.properties.option_title.description=OAuth Credentials +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-asana.credentials.oneOf.1.properties.refresh_token.description= +datasources.section.source-aws-cloudtrail.aws_key_id.title=Key ID +datasources.section.source-aws-cloudtrail.aws_region_name.title=Region Name +datasources.section.source-aws-cloudtrail.aws_secret_key.title=Secret Key +datasources.section.source-aws-cloudtrail.start_date.title=Start Date +datasources.section.source-aws-cloudtrail.aws_key_id.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.aws_region_name.description=The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name. +datasources.section.source-aws-cloudtrail.aws_secret_key.description=AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key. +datasources.section.source-aws-cloudtrail.start_date.description=The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD. +datasources.section.source-azure-table.storage_access_key.title=Access Key +datasources.section.source-azure-table.storage_account_name.title=Account Name +datasources.section.source-azure-table.storage_endpoint_suffix.title=Endpoint Suffix +datasources.section.source-azure-table.storage_access_key.description=Azure Table Storage Access Key. See the docs for more information on how to obtain this key. +datasources.section.source-azure-table.storage_account_name.description=The name of your storage account. +datasources.section.source-azure-table.storage_endpoint_suffix.description=Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix +datasources.section.source-bamboo-hr.api_key.description=Api key of bamboo hr +datasources.section.source-bamboo-hr.custom_reports_fields.description=Comma-separated list of fields to include in custom reports. +datasources.section.source-bamboo-hr.custom_reports_include_default_fields.description=If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names. +datasources.section.source-bamboo-hr.subdomain.description=Sub Domain of bamboo hr +datasources.section.source-bigcommerce.access_token.title=Access Token +datasources.section.source-bigcommerce.start_date.title=Start Date +datasources.section.source-bigcommerce.store_hash.title=Store Hash +datasources.section.source-bigcommerce.access_token.description=Access Token for making authenticated requests. +datasources.section.source-bigcommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-bigcommerce.store_hash.description=The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'. +datasources.section.source-bigquery.credentials_json.title=Credentials JSON +datasources.section.source-bigquery.dataset_id.title=Default Dataset ID +datasources.section.source-bigquery.project_id.title=Project ID +datasources.section.source-bigquery.credentials_json.description=The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key. +datasources.section.source-bigquery.dataset_id.description=The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery. +datasources.section.source-bigquery.project_id.description=The GCP project ID for the project containing the target BigQuery dataset. +datasources.section.source-bing-ads.client_id.title=Client ID +datasources.section.source-bing-ads.client_secret.title=Client Secret +datasources.section.source-bing-ads.developer_token.title=Developer Token +datasources.section.source-bing-ads.refresh_token.title=Refresh Token +datasources.section.source-bing-ads.reports_start_date.title=Reports replication start date +datasources.section.source-bing-ads.tenant_id.title=Tenant ID +datasources.section.source-bing-ads.client_id.description=The Client ID of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.client_secret.description=The Client Secret of your Microsoft Advertising developer application. +datasources.section.source-bing-ads.developer_token.description=Developer token associated with user. See more info in the docs. +datasources.section.source-bing-ads.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-bing-ads.reports_start_date.description=The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format. +datasources.section.source-bing-ads.tenant_id.description=The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value. +datasources.section.source-braintree.environment.title=Environment +datasources.section.source-braintree.merchant_id.title=Merchant ID +datasources.section.source-braintree.private_key.title=Private Key +datasources.section.source-braintree.public_key.title=Public Key +datasources.section.source-braintree.start_date.title=Start Date +datasources.section.source-braintree.environment.description=Environment specifies where the data will come from. +datasources.section.source-braintree.merchant_id.description=The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID. +datasources.section.source-braintree.private_key.description=Braintree Private Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.public_key.description=Braintree Public Key. See the docs for more information on how to obtain this key. +datasources.section.source-braintree.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-cart.access_token.title=Access Token +datasources.section.source-cart.start_date.title=Start Date +datasources.section.source-cart.store_name.title=Store Name +datasources.section.source-cart.access_token.description=Access Token for making authenticated requests. +datasources.section.source-cart.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-cart.store_name.description=The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store. +datasources.section.source-chargebee.product_catalog.title=Product Catalog +datasources.section.source-chargebee.site.title=Site +datasources.section.source-chargebee.site_api_key.title=API Key +datasources.section.source-chargebee.start_date.title=Start Date +datasources.section.source-chargebee.product_catalog.description=Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section. +datasources.section.source-chargebee.site.description=The site prefix for your Chargebee instance. +datasources.section.source-chargebee.site_api_key.description=Chargebee API Key. See the docs for more information on how to obtain this key. +datasources.section.source-chargebee.start_date.description=UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-chargify.api_key.description=Chargify API Key. +datasources.section.source-chargify.domain.description=Chargify domain. Normally this domain follows the following format companyname.chargify.com +datasources.section.source-chartmogul.api_key.description=Chartmogul API key +datasources.section.source-chartmogul.interval.description=Some APIs such as Metrics require intervals to cluster data. +datasources.section.source-chartmogul.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated. +datasources.section.source-clickhouse.database.title=Database +datasources.section.source-clickhouse.host.title=Host +datasources.section.source-clickhouse.password.title=Password +datasources.section.source-clickhouse.port.title=Port +datasources.section.source-clickhouse.ssl.title=SSL Connection +datasources.section.source-clickhouse.username.title=Username +datasources.section.source-clickhouse.database.description=The name of the database. +datasources.section.source-clickhouse.host.description=The host endpoint of the Clickhouse cluster. +datasources.section.source-clickhouse.password.description=The password associated with this username. +datasources.section.source-clickhouse.port.description=The port of the database. +datasources.section.source-clickhouse.ssl.description=Encrypt data using SSL. +datasources.section.source-clickhouse.username.description=The username which is used to access the database. +datasources.section.source-close-com.api_key.description=Close.com API key (usually starts with 'api_'; find yours here). +datasources.section.source-close-com.start_date.description=The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD. +datasources.section.source-cockroachdb.database.title=DB Name +datasources.section.source-cockroachdb.host.title=Host +datasources.section.source-cockroachdb.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-cockroachdb.password.title=Password +datasources.section.source-cockroachdb.port.title=Port +datasources.section.source-cockroachdb.ssl.title=Connect using SSL +datasources.section.source-cockroachdb.username.title=User +datasources.section.source-cockroachdb.database.description=Name of the database. +datasources.section.source-cockroachdb.host.description=Hostname of the database. +datasources.section.source-cockroachdb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-cockroachdb.password.description=Password associated with the username. +datasources.section.source-cockroachdb.port.description=Port of the database. +datasources.section.source-cockroachdb.ssl.description=Encrypt client/server communications for increased security. +datasources.section.source-cockroachdb.username.description=Username to use to access the database. +datasources.section.source-commercetools.client_id.description=Id of API Client. +datasources.section.source-commercetools.client_secret.description=The password of secret of API Client. +datasources.section.source-commercetools.host.description=The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization +datasources.section.source-commercetools.project_key.description=The project key +datasources.section.source-commercetools.region.description=The region of the platform. +datasources.section.source-commercetools.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-confluence.api_token.description=Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ +datasources.section.source-confluence.domain_name.description=Your Confluence domain name +datasources.section.source-confluence.email.description=Your Confluence login email +datasources.section.source-db2.encryption.oneOf.0.title=Unencrypted +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.title=Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated. +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.title=SSL PEM file +datasources.section.source-db2.encryption.oneOf.1.title=TLS Encrypted (verify certificate) +datasources.section.source-db2.encryption.title=Encryption +datasources.section.source-db2.jdbc_url_params.title=JDBC URL Params +datasources.section.source-db2.db.description=Name of the database. +datasources.section.source-db2.encryption.description=Encryption method to use when communicating with the database +datasources.section.source-db2.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-db2.encryption.oneOf.1.description=Verify and use the cert provided by the server. +datasources.section.source-db2.encryption.oneOf.1.properties.key_store_password.description=Key Store Password +datasources.section.source-db2.encryption.oneOf.1.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations +datasources.section.source-db2.host.description=Host of the Db2. +datasources.section.source-db2.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-db2.password.description=Password associated with the username. +datasources.section.source-db2.port.description=Port of the database. +datasources.section.source-db2.username.description=Username to use to access the database. +datasources.section.source-delighted.api_key.title=Delighted API Key +datasources.section.source-delighted.since.title=Since +datasources.section.source-delighted.api_key.description=A Delighted API key. +datasources.section.source-delighted.since.description=The date from which you'd like to replicate the data +datasources.section.source-dixa.api_token.description=Dixa API token +datasources.section.source-dixa.batch_size.description=Number of days to batch into one request. Max 31. +datasources.section.source-dixa.start_date.description=The connector pulls records updated from this date onwards. +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-drift.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-drift.credentials.oneOf.1.title=Access Token +datasources.section.source-drift.credentials.title=Authorization Method +datasources.section.source-drift.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-drift.credentials.oneOf.0.properties.client_id.description=The Client ID of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Drift developer application. +datasources.section.source-drift.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-drift.credentials.oneOf.1.properties.access_token.description=Drift Access Token. See the docs for more information on how to generate this key. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.title=None +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.title=API Key ID +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.title=API Key Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.title=Api Key/Secret +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.title=Password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.title=Username +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.title=Username/Password +datasources.section.source-elasticsearch.authenticationMethod.title=Authentication Method +datasources.section.source-elasticsearch.endpoint.title=Server Endpoint +datasources.section.source-elasticsearch.authenticationMethod.description=The type of authentication to be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.0.description=No authentication will be used +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.description=Use a api key and secret combination to authenticate +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeyId.description=The Key ID to used when accessing an enterprise Elasticsearch instance. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.1.properties.apiKeySecret.description=The secret associated with the API Key ID. +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.description=Basic auth header with a username and password +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.password.description=Basic auth password to access a secure Elasticsearch server +datasources.section.source-elasticsearch.authenticationMethod.oneOf.2.properties.username.description=Basic auth username to access a secure Elasticsearch server +datasources.section.source-elasticsearch.endpoint.description=The full url of the Elasticsearch server +datasources.section.source-facebook-marketing.access_token.title=Access Token +datasources.section.source-facebook-marketing.account_id.title=Account ID +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.title=ValidActionBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.title=Action Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.title=ValidBreakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.title=Breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.title=End Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.title=ValidEnums +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.title=Fields +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.title=Custom Insights Lookback Window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.title=Name +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.title=Start Date +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.title=Time Increment +datasources.section.source-facebook-marketing.custom_insights.items.title=InsightConfig +datasources.section.source-facebook-marketing.custom_insights.title=Custom Insights +datasources.section.source-facebook-marketing.end_date.title=End Date +datasources.section.source-facebook-marketing.fetch_thumbnail_images.title=Fetch Thumbnail Images +datasources.section.source-facebook-marketing.include_deleted.title=Include Deleted +datasources.section.source-facebook-marketing.insights_lookback_window.title=Insights Lookback Window +datasources.section.source-facebook-marketing.max_batch_size.title=Maximum size of Batched Requests +datasources.section.source-facebook-marketing.page_size.title=Page Size of Requests +datasources.section.source-facebook-marketing.start_date.title=Start Date +datasources.section.source-facebook-marketing.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-facebook-marketing.account_id.description=The Facebook Ad account ID to use when pulling data from the Facebook Marketing API. +datasources.section.source-facebook-marketing.custom_insights.description=A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns) +datasources.section.source-facebook-marketing.custom_insights.items.description=Config for custom insights +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.description=A list of chosen action_breakdowns for action_breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.action_breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.description=A list of chosen breakdowns for breakdowns +datasources.section.source-facebook-marketing.custom_insights.items.properties.breakdowns.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.end_date.description=The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.description=A list of chosen fields for fields parameter +datasources.section.source-facebook-marketing.custom_insights.items.properties.fields.items.description=Generic enumeration. +datasources.section.source-facebook-marketing.custom_insights.items.properties.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.custom_insights.items.properties.name.description=The name value of insight +datasources.section.source-facebook-marketing.custom_insights.items.properties.start_date.description=The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-facebook-marketing.custom_insights.items.properties.time_increment.description=Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). +datasources.section.source-facebook-marketing.end_date.description=The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data. +datasources.section.source-facebook-marketing.fetch_thumbnail_images.description=In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url +datasources.section.source-facebook-marketing.include_deleted.description=Include data from deleted Campaigns, Ads, and AdSets +datasources.section.source-facebook-marketing.insights_lookback_window.description=The attribution window +datasources.section.source-facebook-marketing.max_batch_size.description=Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.page_size.description=Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases. +datasources.section.source-facebook-marketing.start_date.description=The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-facebook-pages.access_token.title=Page Access Token +datasources.section.source-facebook-pages.page_id.title=Page ID +datasources.section.source-facebook-pages.access_token.description=Facebook Page Access Token +datasources.section.source-facebook-pages.page_id.description=Page ID +datasources.section.source-faker.count.title=Count +datasources.section.source-faker.records_per_slice.title=Records Per Stream Slice +datasources.section.source-faker.records_per_sync.title=Records Per Sync +datasources.section.source-faker.seed.title=Seed +datasources.section.source-faker.count.description=How many users should be generated in total. This setting does not apply to the purchases or products stream. +datasources.section.source-faker.records_per_slice.description=How many fake records will be in each page (stream slice), before a state message is emitted? +datasources.section.source-faker.records_per_sync.description=How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. +datasources.section.source-faker.seed.description=Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) +datasources.section.source-file.dataset_name.title=Dataset Name +datasources.section.source-file.format.title=File Format +datasources.section.source-file.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.4.properties.user.title=User +datasources.section.source-file.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.5.properties.user.title=User +datasources.section.source-file.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.6.properties.user.title=User +datasources.section.source-file.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file.provider.oneOf.7.properties.storage.title=Storage +datasources.section.source-file.provider.oneOf.7.title=Local Filesystem (limited) +datasources.section.source-file.provider.title=Storage Provider +datasources.section.source-file.reader_options.title=Reader Options +datasources.section.source-file.url.title=URL +datasources.section.source-file.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.host.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.password.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.port.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.4.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.host.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.password.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.port.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.5.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.host.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.password.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.port.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.6.properties.user.description= +datasources.section.source-file.provider.oneOf.7.properties.storage.description=WARNING: Note that the local storage URL available for reading must start with the local mount "/local/" at the moment until we implement more advanced docker mounting options. +datasources.section.source-file.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file.url.description=The URL path to access the file which should be replicated. +datasources.section.source-file-secure.dataset_name.title=Dataset Name +datasources.section.source-file-secure.format.title=File Format +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.title=User-Agent +datasources.section.source-file-secure.provider.oneOf.0.title=HTTPS: Public Web +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.title=Service Account JSON +datasources.section.source-file-secure.provider.oneOf.1.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.1.title=GCS: Google Cloud Storage +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-file-secure.provider.oneOf.2.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.2.title=S3: Amazon Web Services +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.title=SAS Token +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.title=Shared Key +datasources.section.source-file-secure.provider.oneOf.3.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.title=Storage Account +datasources.section.source-file-secure.provider.oneOf.3.title=AzBlob: Azure Blob Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.4.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.4.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.4.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.4.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.4.title=SSH: Secure Shell +datasources.section.source-file-secure.provider.oneOf.5.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.5.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.5.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.5.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.5.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.5.title=SCP: Secure copy protocol +datasources.section.source-file-secure.provider.oneOf.6.properties.host.title=Host +datasources.section.source-file-secure.provider.oneOf.6.properties.password.title=Password +datasources.section.source-file-secure.provider.oneOf.6.properties.port.title=Port +datasources.section.source-file-secure.provider.oneOf.6.properties.storage.title=Storage +datasources.section.source-file-secure.provider.oneOf.6.properties.user.title=User +datasources.section.source-file-secure.provider.oneOf.6.title=SFTP: Secure File Transfer Protocol +datasources.section.source-file-secure.provider.title=Storage Provider +datasources.section.source-file-secure.reader_options.title=Reader Options +datasources.section.source-file-secure.url.title=URL +datasources.section.source-file-secure.dataset_name.description=The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only). +datasources.section.source-file-secure.format.description=The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs). +datasources.section.source-file-secure.provider.description=The storage Provider or Location of the file(s) which should be replicated. +datasources.section.source-file-secure.provider.oneOf.0.properties.user_agent.description=Add User-Agent to request +datasources.section.source-file-secure.provider.oneOf.1.properties.service_account_json.description=In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.2.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.sas_token.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.shared_key.description=To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary. +datasources.section.source-file-secure.provider.oneOf.3.properties.storage_account.description=The globally unique name of the storage account that the desired blob sits within. See here for more details. +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.4.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.5.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.host.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.password.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.port.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.provider.oneOf.6.properties.user.description= +datasources.section.source-file-secure.reader_options.description=This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior. +datasources.section.source-file-secure.url.description=The URL path to access the file which should be replicated. +datasources.section.source-firebolt.account.title=Account +datasources.section.source-firebolt.database.title=Database +datasources.section.source-firebolt.engine.title=Engine +datasources.section.source-firebolt.host.title=Host +datasources.section.source-firebolt.password.title=Password +datasources.section.source-firebolt.username.title=Username +datasources.section.source-firebolt.account.description=Firebolt account to login. +datasources.section.source-firebolt.database.description=The database to connect to. +datasources.section.source-firebolt.engine.description=Engine name or url to connect to. +datasources.section.source-firebolt.host.description=The host name of your Firebolt database. +datasources.section.source-firebolt.password.description=Firebolt password. +datasources.section.source-firebolt.username.description=Firebolt email address you use to login. +datasources.section.source-flexport.api_key.title=API Key +datasources.section.source-flexport.start_date.title=Start Date +datasources.section.source-freshcaller.api_key.title=API Key +datasources.section.source-freshcaller.domain.title=Domain for Freshcaller account +datasources.section.source-freshcaller.requests_per_minute.title=Requests per minute +datasources.section.source-freshcaller.start_date.title=Start Date +datasources.section.source-freshcaller.sync_lag_minutes.title=Lag in minutes for each sync +datasources.section.source-freshcaller.api_key.description=Freshcaller API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshcaller.domain.description=Used to construct Base URL for the Freshcaller APIs +datasources.section.source-freshcaller.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshcaller.start_date.description=UTC date and time. Any data created after this date will be replicated. +datasources.section.source-freshcaller.sync_lag_minutes.description=Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched +datasources.section.source-freshdesk.api_key.title=API Key +datasources.section.source-freshdesk.domain.title=Domain +datasources.section.source-freshdesk.requests_per_minute.title=Requests per minute +datasources.section.source-freshdesk.start_date.title=Start Date +datasources.section.source-freshdesk.api_key.description=Freshdesk API Key. See the docs for more information on how to obtain this key. +datasources.section.source-freshdesk.domain.description=Freshdesk domain +datasources.section.source-freshdesk.requests_per_minute.description=The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account. +datasources.section.source-freshdesk.start_date.description=UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated. +datasources.section.source-freshsales.api_key.title=API Key +datasources.section.source-freshsales.domain_name.title=Domain Name +datasources.section.source-freshsales.api_key.description=Freshsales API Key. See here. The key is case sensitive. +datasources.section.source-freshsales.domain_name.description=The Name of your Freshsales domain +datasources.section.source-freshservice.api_key.title=API Key +datasources.section.source-freshservice.domain_name.title=Domain Name +datasources.section.source-freshservice.start_date.title=Start Date +datasources.section.source-freshservice.api_key.description=Freshservice API Key. See here. The key is case sensitive. +datasources.section.source-freshservice.domain_name.description=The name of your Freshservice domain +datasources.section.source-freshservice.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-github.branch.title=Branch (Optional) +datasources.section.source-github.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-github.credentials.oneOf.0.title=OAuth +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.title=Personal Access Tokens +datasources.section.source-github.credentials.oneOf.1.title=Personal Access Token +datasources.section.source-github.credentials.title=Authentication * +datasources.section.source-github.page_size_for_large_streams.title=Page size for large streams (Optional) +datasources.section.source-github.repository.title=GitHub Repositories +datasources.section.source-github.start_date.title=Start date +datasources.section.source-github.branch.description=Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled. +datasources.section.source-github.credentials.description=Choose how to authenticate to GitHub +datasources.section.source-github.credentials.oneOf.0.properties.access_token.description=OAuth access token +datasources.section.source-github.credentials.oneOf.1.properties.personal_access_token.description=Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with "," +datasources.section.source-github.page_size_for_large_streams.description=The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30. +datasources.section.source-github.repository.description=Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories. +datasources.section.source-github.start_date.description=The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info +datasources.section.source-gitlab.api_url.title=API URL +datasources.section.source-gitlab.groups.title=Groups +datasources.section.source-gitlab.private_token.title=Private Token +datasources.section.source-gitlab.projects.title=Projects +datasources.section.source-gitlab.start_date.title=Start Date +datasources.section.source-gitlab.api_url.description=Please enter your basic URL from GitLab instance. +datasources.section.source-gitlab.groups.description=Space-delimited list of groups. e.g. airbyte.io. +datasources.section.source-gitlab.private_token.description=Log into your GitLab account and then generate a personal Access Token. +datasources.section.source-gitlab.projects.description=Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab. +datasources.section.source-gitlab.start_date.description=The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-google-ads.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-google-ads.credentials.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-ads.credentials.properties.client_id.title=Client ID +datasources.section.source-google-ads.credentials.properties.client_secret.title=Client Secret +datasources.section.source-google-ads.credentials.properties.developer_token.title=Developer Token +datasources.section.source-google-ads.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-google-ads.credentials.title=Google Credentials +datasources.section.source-google-ads.custom_queries.items.properties.query.title=Custom Query +datasources.section.source-google-ads.custom_queries.items.properties.table_name.title=Destination Table Name +datasources.section.source-google-ads.custom_queries.title=Custom GAQL Queries (Optional) +datasources.section.source-google-ads.customer_id.title=Customer ID(s) +datasources.section.source-google-ads.end_date.title=End Date (Optional) +datasources.section.source-google-ads.login_customer_id.title=Login Customer ID for Managed Accounts (Optional) +datasources.section.source-google-ads.start_date.title=Start Date +datasources.section.source-google-ads.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation. +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.description= +datasources.section.source-google-ads.credentials.properties.access_token.description=Access Token for making authenticated requests. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_id.description=The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.client_secret.description=The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.developer_token.description=Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs +datasources.section.source-google-ads.credentials.properties.refresh_token.description=The token for obtaining a new access token. More instruction on how to find this value in our docs +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.description= +datasources.section.source-google-ads.custom_queries.items.properties.query.description=A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information. +datasources.section.source-google-ads.custom_queries.items.properties.table_name.description=The table name in your destination database for choosen query. +datasources.section.source-google-ads.customer_id.description=Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account. +datasources.section.source-google-ads.end_date.description=UTC date and time in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-google-ads.login_customer_id.description=If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here +datasources.section.source-google-ads.start_date.description=UTC date and time in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-data-api.credentials.title=Credentials +datasources.section.source-google-analytics-data-api.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-data-api.date_ranges_start_date.title=Date Range Start Date +datasources.section.source-google-analytics-data-api.property_id.title=Property ID +datasources.section.source-google-analytics-data-api.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-data-api.credentials.description=Credentials for the service +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-data-api.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-data-api.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-data-api.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-data-api.date_ranges_start_date.description=The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD +datasources.section.source-google-analytics-data-api.property_id.description=A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body +datasources.section.source-google-analytics-data-api.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.title=Access Token (Optional) +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-analytics-v4.credentials.oneOf.0.title=Authenticate via Google (Oauth) +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.title=Service Account JSON Key +datasources.section.source-google-analytics-v4.credentials.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-analytics-v4.credentials.title=Credentials +datasources.section.source-google-analytics-v4.custom_reports.title=Custom Reports (Optional) +datasources.section.source-google-analytics-v4.start_date.title=Replication Start Date +datasources.section.source-google-analytics-v4.view_id.title=View ID +datasources.section.source-google-analytics-v4.window_in_days.title=Data request time increment in days (Optional) +datasources.section.source-google-analytics-v4.credentials.description=Credentials for the service +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_id.description=The Client ID of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Google Analytics developer application. +datasources.section.source-google-analytics-v4.credentials.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-google-analytics-v4.credentials.oneOf.1.properties.credentials_json.description=The JSON key of the service account to use for authorization +datasources.section.source-google-analytics-v4.custom_reports.description=A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field. +datasources.section.source-google-analytics-v4.start_date.description=The date in the format YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-google-analytics-v4.view_id.description=The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer. +datasources.section.source-google-analytics-v4.window_in_days.description=The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-directory.credentials.oneOf.0.title=Sign in via Google (OAuth) +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.title=Credentials JSON +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.title=Credentials Title +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-google-directory.credentials.oneOf.1.title=Service Account Key +datasources.section.source-google-directory.credentials.title=Google Credentials +datasources.section.source-google-directory.credentials.description=Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios. +datasources.section.source-google-directory.credentials.oneOf.0.description=For these scenario user only needs to give permission to read Google Directory data. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_id.description=The Client ID of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the developer application. +datasources.section.source-google-directory.credentials.oneOf.0.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.0.properties.refresh_token.description=The Token for obtaining a new access token. +datasources.section.source-google-directory.credentials.oneOf.1.description=For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-directory.credentials.oneOf.1.properties.credentials_title.description=Authentication Scenario +datasources.section.source-google-directory.credentials.oneOf.1.properties.email.description=The email of the user, which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-google-search-console.authorization.oneOf.0.title=OAuth +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.title=Admin Email +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.title=Service Account JSON Key +datasources.section.source-google-search-console.authorization.oneOf.1.title=Service Account Key Authentication +datasources.section.source-google-search-console.authorization.title=Authentication Type +datasources.section.source-google-search-console.end_date.title=End Date +datasources.section.source-google-search-console.site_urls.title=Website URL Property +datasources.section.source-google-search-console.start_date.title=Start Date +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.description= +datasources.section.source-google-search-console.authorization.oneOf.0.properties.access_token.description=Access token for making authenticated requests. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_id.description=The client ID of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.client_secret.description=The client secret of your Google Search Console developer application. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining a new access token. Read more here. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.email.description=The email of the user which has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-search-console.authorization.oneOf.1.properties.service_account_info.description=The JSON key of the service account to use for authorization. Read more here. +datasources.section.source-google-search-console.end_date.description=UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field. +datasources.section.source-google-search-console.site_urls.description=The URLs of the website property attached to your GSC account. Read more here. +datasources.section.source-google-search-console.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-google-workspace-admin-reports.credentials_json.title=Credentials JSON +datasources.section.source-google-workspace-admin-reports.email.title=Email +datasources.section.source-google-workspace-admin-reports.lookback.title=Lookback Window in Days +datasources.section.source-google-workspace-admin-reports.credentials_json.description=The contents of the JSON service account key. See the docs for more information on how to generate this key. +datasources.section.source-google-workspace-admin-reports.email.description=The email of the user, who has permissions to access the Google Workspace Admin APIs. +datasources.section.source-google-workspace-admin-reports.lookback.description=Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days. +datasources.section.source-greenhouse.api_key.title=API Key +datasources.section.source-greenhouse.api_key.description=Greenhouse API Key. See the docs for more information on how to generate this key. +datasources.section.source-harvest.account_id.title=Account ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-harvest.credentials.oneOf.0.title=Authenticate via Harvest (OAuth) +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.title=Personal Access Token +datasources.section.source-harvest.credentials.oneOf.1.title=Authenticate with Personal Access Token +datasources.section.source-harvest.credentials.title=Authentication mechanism +datasources.section.source-harvest.replication_start_date.title=Start Date +datasources.section.source-harvest.account_id.description=Harvest account ID. Required for all Harvest requests in pair with Personal Access Token +datasources.section.source-harvest.credentials.description=Choose how to authenticate to Harvest. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_id.description=The Client ID of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Harvest developer application. +datasources.section.source-harvest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-harvest.credentials.oneOf.1.properties.api_token.description=Log into Harvest and then create new personal access token. +datasources.section.source-harvest.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-hellobaton.api_key.description=authentication key required to access the api endpoints +datasources.section.source-hellobaton.company.description=Company name that generates your base api url +datasources.section.source-hubplanner.api_key.description=Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details. +datasources.section.source-instagram.access_token.title=Access Token +datasources.section.source-instagram.start_date.title=Start Date +datasources.section.source-instagram.access_token.description=The value of the access token generated. See the docs for more information +datasources.section.source-instagram.start_date.description=The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-intercom.access_token.title=Access token +datasources.section.source-intercom.start_date.title=Start date +datasources.section.source-intercom.access_token.description=Access token for making authenticated requests. See the Intercom docs for more information. +datasources.section.source-intercom.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-iterable.api_key.title=API Key +datasources.section.source-iterable.start_date.title=Start Date +datasources.section.source-iterable.api_key.description=Iterable API Key. See the docs for more information on how to obtain this key. +datasources.section.source-iterable.start_date.description=The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-jdbc.jdbc_url.title=JDBC URL +datasources.section.source-jdbc.password.title=Password +datasources.section.source-jdbc.username.title=Username +datasources.section.source-jdbc.jdbc_url.description=JDBC formatted URL. See the standard here. +datasources.section.source-jdbc.password.description=The password associated with this username. +datasources.section.source-jdbc.username.description=The username which is used to access the database. +datasources.section.source-jira.additional_fields.title=Additional Fields +datasources.section.source-jira.api_token.title=API Token +datasources.section.source-jira.domain.title=Domain +datasources.section.source-jira.email.title=Email +datasources.section.source-jira.enable_experimental_streams.title=Enable Experimental Streams +datasources.section.source-jira.expand_issue_changelog.title=Expand Issue Changelog +datasources.section.source-jira.projects.title=Projects +datasources.section.source-jira.render_fields.title=Render Issue Fields +datasources.section.source-jira.start_date.title=Start Date +datasources.section.source-jira.additional_fields.description=List of additional fields to include in replicating issues. +datasources.section.source-jira.api_token.description=Jira API Token. See the docs for more information on how to generate this key. +datasources.section.source-jira.domain.description=The Domain for your Jira account, e.g. airbyteio.atlassian.net +datasources.section.source-jira.email.description=The user email for your Jira account. +datasources.section.source-jira.enable_experimental_streams.description=Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info. +datasources.section.source-jira.expand_issue_changelog.description=Expand the changelog when replicating issues. +datasources.section.source-jira.projects.description=List of Jira project keys to replicate data for. +datasources.section.source-jira.render_fields.description=Render issue fields in HTML format in addition to Jira JSON-like format. +datasources.section.source-jira.start_date.description=The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues. +datasources.section.source-kafka.auto_commit_interval_ms.title=Auto Commit Interval, ms +datasources.section.source-kafka.auto_offset_reset.title=Auto Offset Reset +datasources.section.source-kafka.bootstrap_servers.title=Bootstrap Servers +datasources.section.source-kafka.client_dns_lookup.title=Client DNS Lookup +datasources.section.source-kafka.client_id.title=Client ID +datasources.section.source-kafka.enable_auto_commit.title=Enable Auto Commit +datasources.section.source-kafka.group_id.title=Group ID +datasources.section.source-kafka.max_poll_records.title=Max Poll Records +datasources.section.source-kafka.polling_time.title=Polling Time +datasources.section.source-kafka.protocol.oneOf.0.title=PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.1.title=SASL PLAINTEXT +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.title=SASL JAAS Config +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.title=SASL Mechanism +datasources.section.source-kafka.protocol.oneOf.2.title=SASL SSL +datasources.section.source-kafka.protocol.title=Protocol +datasources.section.source-kafka.receive_buffer_bytes.title=Receive Buffer, bytes +datasources.section.source-kafka.repeated_calls.title=Repeated Calls +datasources.section.source-kafka.request_timeout_ms.title=Request Timeout, ms +datasources.section.source-kafka.retry_backoff_ms.title=Retry Backoff, ms +datasources.section.source-kafka.subscription.oneOf.0.properties.topic_partitions.title=List of topic:partition Pairs +datasources.section.source-kafka.subscription.oneOf.0.title=Manually assign a list of partitions +datasources.section.source-kafka.subscription.oneOf.1.properties.topic_pattern.title=Topic Pattern +datasources.section.source-kafka.subscription.oneOf.1.title=Subscribe to all topics matching specified pattern +datasources.section.source-kafka.subscription.title=Subscription Method +datasources.section.source-kafka.test_topic.title=Test Topic +datasources.section.source-kafka.auto_commit_interval_ms.description=The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true. +datasources.section.source-kafka.auto_offset_reset.description=What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer. +datasources.section.source-kafka.bootstrap_servers.description=A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down). +datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses. +datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging. +datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background. +datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups. +datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll. +datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages. +datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files. +datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. +datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used. +datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received. +datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. +datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. +datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions. +datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one). +datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read. +datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages. +datasources.section.source-klaviyo.api_key.title=Api Key +datasources.section.source-klaviyo.start_date.title=Start Date +datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key. +datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-kustomer-singer.api_token.title=API Token +datasources.section.source-kustomer-singer.start_date.title=Start Date +datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this +datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data +datasources.section.source-kyriba.domain.title=Domain +datasources.section.source-kyriba.end_date.title=End Date +datasources.section.source-kyriba.password.title=Password +datasources.section.source-kyriba.start_date.title=Start Date +datasources.section.source-kyriba.username.title=Username +datasources.section.source-kyriba.domain.description=Kyriba domain +datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date. +datasources.section.source-kyriba.password.description=Password to be used in basic auth +datasources.section.source-kyriba.start_date.description=The date the sync should start from. +datasources.section.source-kyriba.username.description=Username to be used in basic auth +datasources.section.source-lemlist.api_key.title=API key +datasources.section.source-lemlist.api_key.description=Lemlist API key. +datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional) +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-ads.credentials.title=Authentication * +datasources.section.source-linkedin-ads.start_date.title=Start date +datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application. +datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours. +datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token +datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token +datasources.section.source-linkedin-pages.credentials.title=Authentication * +datasources.section.source-linkedin-pages.org_id.title=Organization ID +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application. +datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours. +datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID +datasources.section.source-linnworks.application_id.title=Application ID. +datasources.section.source-linnworks.application_secret.title=Application Secret +datasources.section.source-linnworks.start_date.title=Start Date +datasources.section.source-linnworks.token.title=API Token +datasources.section.source-linnworks.application_id.description=Linnworks Application ID +datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret +datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-looker.client_id.title=Client ID +datasources.section.source-looker.client_secret.title=Client Secret +datasources.section.source-looker.domain.title=Domain +datasources.section.source-looker.run_look_ids.title=Look IDs to Run +datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key. +datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key. +datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address +datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional) +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key +datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key +datasources.section.source-mailchimp.credentials.title=Authentication * +datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key. +datasources.section.source-mailgun.domain_region.title=Domain Region Code +datasources.section.source-mailgun.private_key.title=Private API Key +datasources.section.source-mailgun.start_date.title=Replication Start Date +datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'. +datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data. +datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago. +datasources.section.source-marketo.client_id.title=Client ID +datasources.section.source-marketo.client_secret.title=Client Secret +datasources.section.source-marketo.domain_url.title=Domain URL +datasources.section.source-marketo.start_date.title=Start Date +datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this. +datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this. +datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0) +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID +datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft +datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism +datasources.section.source-microsoft-teams.period.title=Period +datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token. +datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application. +datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL +datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180. +datasources.section.source-mixpanel.api_secret.title=Project Secret +datasources.section.source-mixpanel.attribution_window.title=Attribution Window +datasources.section.source-mixpanel.date_window_size.title=Date slicing window +datasources.section.source-mixpanel.end_date.title=End Date +datasources.section.source-mixpanel.project_timezone.title=Project Timezone +datasources.section.source-mixpanel.region.title=Region +datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default +datasources.section.source-mixpanel.start_date.title=Start Date +datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this. +datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days. +datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment. +datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date +datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console. +datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU. +datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored. +datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default. +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional) +datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-monday.credentials.oneOf.1.title=API Token +datasources.section.source-monday.credentials.title=Authorization Method +datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com +datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests. +datasources.section.source-mongodb.auth_source.title=Authentication source +datasources.section.source-mongodb.database.title=Database name +datasources.section.source-mongodb.host.title=Host +datasources.section.source-mongodb.password.title=Password +datasources.section.source-mongodb.port.title=Port +datasources.section.source-mongodb.replica_set.title=Replica Set +datasources.section.source-mongodb.ssl.title=TLS connection +datasources.section.source-mongodb.user.title=User +datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info. +datasources.section.source-mongodb.database.description=Database to be replicated. +datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated. +datasources.section.source-mongodb.password.description=Password +datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated. +datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info. +datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB. +datasources.section.source-mongodb.user.description=User +datasources.section.source-mongodb-v2.auth_source.title=Authentication Source +datasources.section.source-mongodb-v2.database.title=Database Name +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection +datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses +datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL +datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas +datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type +datasources.section.source-mongodb-v2.password.title=Password +datasources.section.source-mongodb-v2.user.title=User +datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored. +datasources.section.source-mongodb-v2.database.description=The database you want to replicate. +datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database. +datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set. +datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma. +datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to. +datasources.section.source-mongodb-v2.password.description=The password associated with this username. +datasources.section.source-mongodb-v2.user.description=The username which is used to access the database. +datasources.section.source-mssql.database.title=Database +datasources.section.source-mssql.host.title=Host +datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mssql.password.title=Password +datasources.section.source-mssql.port.title=Port +datasources.section.source-mssql.replication_method.oneOf.0.title=Standard +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level +datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mssql.replication_method.title=Replication Method +datasources.section.source-mssql.schemas.title=Schemas +datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted +datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate) +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate +datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate) +datasources.section.source-mssql.ssl_method.title=SSL Method +datasources.section.source-mssql.username.title=Username +datasources.section.source-mssql.database.description=The name of the database. +datasources.section.source-mssql.host.description=The hostname of the database. +datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mssql.password.description=The password associated with the username. +datasources.section.source-mssql.port.description=The port of the database. +datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC. +datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database. +datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database. +datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!) +datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate. +datasources.section.source-mssql.username.description=The username which is used to access the database. +datasources.section.source-my-hours.email.title=Email +datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size +datasources.section.source-my-hours.password.title=Password +datasources.section.source-my-hours.start_date.title=Start Date +datasources.section.source-my-hours.email.description=Your My Hours username +datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days +datasources.section.source-my-hours.password.description=The password associated to the username +datasources.section.source-my-hours.start_date.description=Start date for collecting time logs +datasources.section.source-mysql.database.title=Database +datasources.section.source-mysql.host.title=Host +datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params +datasources.section.source-mysql.password.title=Password +datasources.section.source-mysql.port.title=Port +datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD +datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-mysql.replication_method.title=Replication Method +datasources.section.source-mysql.ssl.title=SSL Connection +datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred +datasources.section.source-mysql.ssl_mode.oneOf.1.title=required +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity +datasources.section.source-mysql.ssl_mode.title=SSL modes +datasources.section.source-mysql.username.title=Username +datasources.section.source-mysql.database.description=The database name. +datasources.section.source-mysql.host.description=The host name of the database. +datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-mysql.password.description=The password associated with the username. +datasources.section.source-mysql.port.description=The port to connect to. +datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database. +datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself. +datasources.section.source-mysql.ssl.description=Encrypt data using SSL. +datasources.section.source-mysql.ssl_mode.description=SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs. +datasources.section.source-mysql.ssl_mode.oneOf.0.description=Preferred SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.1.description=Require SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.description=Verify CA SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.ssl_mode.oneOf.3.description=Verify-full SSL mode. +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.description=CA certificate +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.description=Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.description=Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well) +datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically. +datasources.section.source-mysql.username.description=The username which is used to access the database. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-notion.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-notion.credentials.oneOf.1.properties.token.title=Access Token +datasources.section.source-notion.credentials.oneOf.1.title=Access Token +datasources.section.source-notion.credentials.title=Authenticate using +datasources.section.source-notion.start_date.title=Start Date +datasources.section.source-notion.credentials.description=Pick an authentication method. +datasources.section.source-notion.credentials.oneOf.0.properties.access_token.description=Access Token is a token you received by complete the OauthWebFlow of Notion. +datasources.section.source-notion.credentials.oneOf.0.properties.client_id.description=The ClientID of your Notion integration. +datasources.section.source-notion.credentials.oneOf.0.properties.client_secret.description=The ClientSecret of your Notion integration. +datasources.section.source-notion.credentials.oneOf.1.properties.token.description=Notion API access token, see the docs for more information on how to obtain this token. +datasources.section.source-notion.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-okta.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.title=Personal API Token +datasources.section.source-okta.credentials.oneOf.1.title=API Token +datasources.section.source-okta.credentials.title=Authorization Method * +datasources.section.source-okta.domain.title=Okta domain +datasources.section.source-okta.start_date.title=Start Date +datasources.section.source-okta.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-okta.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-okta.credentials.oneOf.1.properties.api_token.description=An Okta token. See the docs for instructions on how to generate it. +datasources.section.source-okta.domain.description=The Okta domain. See the docs for instructions on how to find it. +datasources.section.source-okta.start_date.description=UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated. +datasources.section.source-onesignal.outcome_names.title=Outcome Names +datasources.section.source-onesignal.start_date.title=Start Date +datasources.section.source-onesignal.user_auth_key.title=User Auth Key +datasources.section.source-onesignal.outcome_names.description=Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details +datasources.section.source-onesignal.start_date.description=The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-onesignal.user_auth_key.description=OneSignal User Auth Key, see the docs for more information on how to obtain this key. +datasources.section.source-openweather.appid.title=App ID +datasources.section.source-openweather.lang.title=Language +datasources.section.source-openweather.lat.title=Latitude +datasources.section.source-openweather.lon.title=Longitude +datasources.section.source-openweather.units.title=Units +datasources.section.source-openweather.appid.description=Your OpenWeather API Key. See here. The key is case sensitive. +datasources.section.source-openweather.lang.description=You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages. +datasources.section.source-openweather.lat.description=Latitude for which you want to get weather condition from. (min -90, max 90) +datasources.section.source-openweather.lon.description=Longitude for which you want to get weather condition from. (min -180, max 180) +datasources.section.source-openweather.units.description=Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default. +datasources.section.source-oracle.connection_data.oneOf.0.properties.service_name.title=Service name +datasources.section.source-oracle.connection_data.oneOf.0.title=Service name +datasources.section.source-oracle.connection_data.oneOf.1.properties.sid.title=System ID (SID) +datasources.section.source-oracle.connection_data.oneOf.1.title=System ID (SID) +datasources.section.source-oracle.connection_data.title=Connect by +datasources.section.source-oracle.encryption.oneOf.0.title=Unencrypted +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm +datasources.section.source-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE) +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM File +datasources.section.source-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate) +datasources.section.source-oracle.encryption.title=Encryption +datasources.section.source-oracle.host.title=Host +datasources.section.source-oracle.jdbc_url_params.title=JDBC URL Params +datasources.section.source-oracle.password.title=Password +datasources.section.source-oracle.port.title=Port +datasources.section.source-oracle.schemas.title=Schemas +datasources.section.source-oracle.username.title=User +datasources.section.source-oracle.connection_data.description=Connect data that will be used for DB connection +datasources.section.source-oracle.connection_data.oneOf.0.description=Use service name +datasources.section.source-oracle.connection_data.oneOf.1.description=Use SID (Oracle System Identifier) +datasources.section.source-oracle.encryption.description=The encryption method with is used when communicating with the database. +datasources.section.source-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted. +datasources.section.source-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports. +datasources.section.source-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines what encryption algorithm is used. +datasources.section.source-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server. +datasources.section.source-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations. +datasources.section.source-oracle.host.description=Hostname of the database. +datasources.section.source-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-oracle.password.description=The password associated with the username. +datasources.section.source-oracle.port.description=Port of the database. +datasources.section.source-oracle.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive. +datasources.section.source-oracle.username.description=The username which is used to access the database. +datasources.section.source-orb.api_key.title=Orb API Key +datasources.section.source-orb.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-orb.numeric_event_properties_keys.title=Event properties keys (numeric values) +datasources.section.source-orb.start_date.title=Start Date +datasources.section.source-orb.string_event_properties_keys.title=Event properties keys (string values) +datasources.section.source-orb.api_key.description=Orb API Key, issued from the Orb admin console. +datasources.section.source-orb.lookback_window_days.description=When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced. +datasources.section.source-orb.numeric_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-orb.start_date.description=UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced. +datasources.section.source-orb.string_event_properties_keys.description=Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction. +datasources.section.source-outreach.client_id.title=Client ID +datasources.section.source-outreach.client_secret.title=Client Secret +datasources.section.source-outreach.redirect_uri.title=Redirect URI +datasources.section.source-outreach.refresh_token.title=Refresh Token +datasources.section.source-outreach.start_date.title=Start Date +datasources.section.source-outreach.client_id.description=The Client ID of your Outreach developer application. +datasources.section.source-outreach.client_secret.description=The Client Secret of your Outreach developer application. +datasources.section.source-outreach.redirect_uri.description=A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token. +datasources.section.source-outreach.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-outreach.start_date.description=The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-pardot.client_id.description=The Consumer Key that can be found when viewing your app in Salesforce +datasources.section.source-pardot.client_secret.description=The Consumer Secret that can be found when viewing your app in Salesforce +datasources.section.source-pardot.is_sandbox.description=Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false. +datasources.section.source-pardot.pardot_business_unit_id.description=Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup +datasources.section.source-pardot.refresh_token.description=Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. +datasources.section.source-pardot.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter +datasources.section.source-paypal-transaction.client_id.title=Client ID +datasources.section.source-paypal-transaction.client_secret.title=Client secret +datasources.section.source-paypal-transaction.is_sandbox.title=Sandbox +datasources.section.source-paypal-transaction.refresh_token.title=Refresh token (Optional) +datasources.section.source-paypal-transaction.start_date.title=Start Date +datasources.section.source-paypal-transaction.client_id.description=The Client ID of your Paypal developer application. +datasources.section.source-paypal-transaction.client_secret.description=The Client Secret of your Paypal developer application. +datasources.section.source-paypal-transaction.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-paypal-transaction.refresh_token.description=The key to refresh the expired access token. +datasources.section.source-paypal-transaction.start_date.description=Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time. +datasources.section.source-paystack.lookback_window_days.title=Lookback Window (in days) +datasources.section.source-paystack.secret_key.title=Secret Key +datasources.section.source-paystack.start_date.title=Start Date +datasources.section.source-paystack.lookback_window_days.description=When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation. +datasources.section.source-paystack.secret_key.description=The Paystack API key (usually starts with 'sk_live_'; find yours here). +datasources.section.source-paystack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-persistiq.api_key.description=PersistIq API Key. See the docs for more information on where to find that key. +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pinterest.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-pinterest.credentials.oneOf.1.title=Access Token +datasources.section.source-pinterest.credentials.title=Authorization Method +datasources.section.source-pinterest.start_date.title=Start Date +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-pinterest.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-pinterest.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-pinterest.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-pinterest.start_date.description=A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-pipedrive.authorization.oneOf.0.title=Sign in via Pipedrive (OAuth) +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.title=API Token +datasources.section.source-pipedrive.authorization.oneOf.1.title=API Key Authentication +datasources.section.source-pipedrive.authorization.title=Authentication Type +datasources.section.source-pipedrive.replication_start_date.title=Start Date +datasources.section.source-pipedrive.authorization.description=Choose one of the possible authorization method +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_id.description=The Client ID of your Pipedrive developer application. +datasources.section.source-pipedrive.authorization.oneOf.0.properties.client_secret.description=The Client Secret of your Pipedrive developer application +datasources.section.source-pipedrive.authorization.oneOf.0.properties.refresh_token.description=The token for obtaining the new access token. +datasources.section.source-pipedrive.authorization.oneOf.1.properties.api_token.description=The Pipedrive API Token. +datasources.section.source-pipedrive.replication_start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental +datasources.section.source-pivotal-tracker.api_token.description=Pivotal Tracker API token +datasources.section.source-plaid.access_token.title=Access Token +datasources.section.source-plaid.api_key.title=API Key +datasources.section.source-plaid.client_id.title=Client ID +datasources.section.source-plaid.plaid_env.title=Plaid Environment +datasources.section.source-plaid.start_date.title=Start Date +datasources.section.source-plaid.access_token.description=The end-user's Link access token. +datasources.section.source-plaid.api_key.description=The Plaid API key to use to hit the API. +datasources.section.source-plaid.client_id.description=The Plaid client id +datasources.section.source-plaid.plaid_env.description=The Plaid environment +datasources.section.source-plaid.start_date.description=The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated. +datasources.section.source-pokeapi.pokemon_name.title=Pokemon Name +datasources.section.source-pokeapi.pokemon_name.description=Pokemon requested from the API. +datasources.section.source-postgres.database.title=Database Name +datasources.section.source-postgres.host.title=Host +datasources.section.source-postgres.jdbc_url_params.title=JDBC URL Parameters (Advanced) +datasources.section.source-postgres.password.title=Password +datasources.section.source-postgres.port.title=Port +datasources.section.source-postgres.replication_method.oneOf.0.title=Standard +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.title=Initial Waiting Time in Seconds (Advanced) +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.title=Plugin +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.title=Publication +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.title=Replication Slot +datasources.section.source-postgres.replication_method.oneOf.1.title=Logical Replication (CDC) +datasources.section.source-postgres.replication_method.title=Replication Method +datasources.section.source-postgres.schemas.title=Schemas +datasources.section.source-postgres.ssl.title=Connect using SSL +datasources.section.source-postgres.ssl_mode.oneOf.0.title=disable +datasources.section.source-postgres.ssl_mode.oneOf.1.title=allow +datasources.section.source-postgres.ssl_mode.oneOf.2.title=prefer +datasources.section.source-postgres.ssl_mode.oneOf.3.title=require +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.4.title=verify-ca +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA Certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client Certificate (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client Key (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional) +datasources.section.source-postgres.ssl_mode.oneOf.5.title=verify-full +datasources.section.source-postgres.ssl_mode.title=SSL Modes +datasources.section.source-postgres.username.title=Username +datasources.section.source-postgres.database.description=Name of the database. +datasources.section.source-postgres.host.description=Hostname of the database. +datasources.section.source-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters. +datasources.section.source-postgres.password.description=Password associated with the username. +datasources.section.source-postgres.port.description=Port of the database. +datasources.section.source-postgres.replication_method.description=Replication method for extracting data from the database. +datasources.section.source-postgres.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally. +datasources.section.source-postgres.replication_method.oneOf.1.description=Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs. +datasources.section.source-postgres.replication_method.oneOf.1.properties.initial_waiting_seconds.description=The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time. +datasources.section.source-postgres.replication_method.oneOf.1.properties.plugin.description=A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins. +datasources.section.source-postgres.replication_method.oneOf.1.properties.publication.description=A Postgres publication used for consuming changes. Read about publications and replication identities. +datasources.section.source-postgres.replication_method.oneOf.1.properties.replication_slot.description=A plugin logical replication slot. Read about replication slots. +datasources.section.source-postgres.schemas.description=The list of schemas (case sensitive) to sync from. Defaults to public. +datasources.section.source-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes. +datasources.section.source-postgres.ssl_mode.description=SSL connection modes. +datasources.section.source-postgres.ssl_mode.oneOf.0.description=Disable SSL. +datasources.section.source-postgres.ssl_mode.oneOf.1.description=Allow SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.3.description=Require SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode. +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key +datasources.section.source-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. If you do not add it - the password will be generated automatically. +datasources.section.source-postgres.username.description=Username to access the database. +datasources.section.source-posthog.api_key.title=API Key +datasources.section.source-posthog.base_url.title=Base URL +datasources.section.source-posthog.start_date.title=Start Date +datasources.section.source-posthog.api_key.description=API Key. See the docs for information on how to generate this key. +datasources.section.source-posthog.base_url.description=Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com). +datasources.section.source-posthog.start_date.description=The date from which you'd like to replicate the data. Any data before this date will not be replicated. +datasources.section.source-prestashop.access_key.description=Your PrestaShop access key. See the docs for info on how to obtain this. +datasources.section.source-prestashop.url.description=Shop URL without trailing slash (domain name or IP address) +datasources.section.source-qualaroo.key.title=API key +datasources.section.source-qualaroo.start_date.title=Start Date +datasources.section.source-qualaroo.survey_ids.title=Qualaroo survey IDs +datasources.section.source-qualaroo.token.title=API token +datasources.section.source-qualaroo.key.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-qualaroo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-qualaroo.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated. +datasources.section.source-qualaroo.token.description=A Qualaroo token. See the docs for instructions on how to generate it. +datasources.section.source-quickbooks-singer.client_id.title=Client ID +datasources.section.source-quickbooks-singer.client_secret.title=Client Secret +datasources.section.source-quickbooks-singer.realm_id.title=Realm ID +datasources.section.source-quickbooks-singer.refresh_token.title=Refresh Token +datasources.section.source-quickbooks-singer.sandbox.title=Sandbox +datasources.section.source-quickbooks-singer.start_date.title=Start Date +datasources.section.source-quickbooks-singer.user_agent.title=User Agent +datasources.section.source-quickbooks-singer.client_id.description=Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.client_secret.description= Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production. +datasources.section.source-quickbooks-singer.realm_id.description=Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token. +datasources.section.source-quickbooks-singer.refresh_token.description=A token used when refreshing the access token. +datasources.section.source-quickbooks-singer.sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-quickbooks-singer.start_date.description=The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-quickbooks-singer.user_agent.description=Process and email for API logging purposes. Example: tap-quickbooks . +datasources.section.source-recharge.access_token.title=Access Token +datasources.section.source-recharge.start_date.title=Start Date +datasources.section.source-recharge.access_token.description=The value of the Access Token generated. See the docs for more information. +datasources.section.source-recharge.start_date.description=The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-recurly.api_key.title=API Key +datasources.section.source-recurly.api_key.description=Recurly API Key. See the docs for more information on how to generate this key. +datasources.section.source-recurly.begin_time.description=ISO8601 timestamp from which the replication from Recurly API will start from. +datasources.section.source-recurly.end_time.description=ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported. +datasources.section.source-redshift.database.title=Database +datasources.section.source-redshift.host.title=Host +datasources.section.source-redshift.jdbc_url_params.title=JDBC URL Params +datasources.section.source-redshift.password.title=Password +datasources.section.source-redshift.port.title=Port +datasources.section.source-redshift.schemas.title=Schemas +datasources.section.source-redshift.username.title=Username +datasources.section.source-redshift.database.description=Name of the database. +datasources.section.source-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com). +datasources.section.source-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-redshift.password.description=Password associated with the username. +datasources.section.source-redshift.port.description=Port of the database. +datasources.section.source-redshift.schemas.description=The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive. +datasources.section.source-redshift.username.description=Username to use to access the database. +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-retently.credentials.oneOf.0.title=Authenticate via Retently (OAuth) +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.title=API Token +datasources.section.source-retently.credentials.oneOf.1.title=Authenticate with API Token +datasources.section.source-retently.credentials.title=Authentication Mechanism +datasources.section.source-retently.credentials.description=Choose how to authenticate to Retently +datasources.section.source-retently.credentials.oneOf.0.properties.client_id.description=The Client ID of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Retently developer application. +datasources.section.source-retently.credentials.oneOf.0.properties.refresh_token.description=Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires. +datasources.section.source-retently.credentials.oneOf.1.properties.api_key.description=Retently API Token. See the docs for more information on how to obtain this key. +datasources.section.source-rki-covid.start_date.title=Start Date +datasources.section.source-rki-covid.start_date.description=UTC date in the format 2017-01-25. Any data before this date will not be replicated. +datasources.section.source-s3.dataset.title=Output Stream Name +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.title=Additional Reader Options +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.title=Advanced Options +datasources.section.source-s3.format.oneOf.0.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.0.properties.delimiter.title=Delimiter +datasources.section.source-s3.format.oneOf.0.properties.double_quote.title=Double Quote +datasources.section.source-s3.format.oneOf.0.properties.encoding.title=Encoding +datasources.section.source-s3.format.oneOf.0.properties.escape_char.title=Escape Character +datasources.section.source-s3.format.oneOf.0.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.title=Infer Datatypes +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.0.properties.quote_char.title=Quote Character +datasources.section.source-s3.format.oneOf.0.title=CSV +datasources.section.source-s3.format.oneOf.1.properties.batch_size.title=Record batch size +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.title=Buffer Size +datasources.section.source-s3.format.oneOf.1.properties.columns.title=Selected Columns +datasources.section.source-s3.format.oneOf.1.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.1.title=Parquet +datasources.section.source-s3.format.oneOf.2.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.2.title=Avro +datasources.section.source-s3.format.oneOf.3.properties.block_size.title=Block Size +datasources.section.source-s3.format.oneOf.3.properties.filetype.title=Filetype +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.title=Allow newlines in values +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.title=UnexpectedFieldBehaviorEnum +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.title=Unexpected field behavior +datasources.section.source-s3.format.oneOf.3.title=Jsonl +datasources.section.source-s3.format.title=File Format +datasources.section.source-s3.path_pattern.title=Pattern of files to replicate +datasources.section.source-s3.provider.properties.aws_access_key_id.title=AWS Access Key ID +datasources.section.source-s3.provider.properties.aws_secret_access_key.title=AWS Secret Access Key +datasources.section.source-s3.provider.properties.bucket.title=Bucket +datasources.section.source-s3.provider.properties.endpoint.title=Endpoint +datasources.section.source-s3.provider.properties.path_prefix.title=Path Prefix +datasources.section.source-s3.provider.properties.use_ssl.title=Use TLS +datasources.section.source-s3.provider.properties.verify_ssl_cert.title=Verify TLS Certificates +datasources.section.source-s3.provider.title=S3: Amazon Web Services +datasources.section.source-s3.schema.title=Manually enforced data schema (Optional) +datasources.section.source-s3.dataset.description=The name of the stream you would like this source to output. Can contain letters, numbers, or underscores. +datasources.section.source-s3.format.description=The format of the files you'd like to replicate +datasources.section.source-s3.format.oneOf.0.description=This connector utilises PyArrow (Apache Arrow) for CSV parsing. +datasources.section.source-s3.format.oneOf.0.properties.additional_reader_options.description=Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems. +datasources.section.source-s3.format.oneOf.0.properties.advanced_options.description=Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above. +datasources.section.source-s3.format.oneOf.0.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.0.properties.delimiter.description=The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\t'. +datasources.section.source-s3.format.oneOf.0.properties.double_quote.description=Whether two quotes in a quoted CSV value denote a single quote in the data. +datasources.section.source-s3.format.oneOf.0.properties.encoding.description=The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options. +datasources.section.source-s3.format.oneOf.0.properties.escape_char.description=The character used for escaping special characters. To disallow escaping, leave this field blank. +datasources.section.source-s3.format.oneOf.0.properties.infer_datatypes.description=Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings +datasources.section.source-s3.format.oneOf.0.properties.newlines_in_values.description=Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.0.properties.quote_char.description=The character used for quoting CSV values. To disallow quoting, make this field blank. +datasources.section.source-s3.format.oneOf.1.description=This connector utilises PyArrow (Apache Arrow) for Parquet parsing. +datasources.section.source-s3.format.oneOf.1.properties.batch_size.description=Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.buffer_size.description=Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide. +datasources.section.source-s3.format.oneOf.1.properties.columns.description=If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns. +datasources.section.source-s3.format.oneOf.2.description=This connector utilises fastavro for Avro parsing. +datasources.section.source-s3.format.oneOf.3.description=This connector uses PyArrow for JSON Lines (jsonl) file parsing. +datasources.section.source-s3.format.oneOf.3.properties.block_size.description=The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors. +datasources.section.source-s3.format.oneOf.3.properties.newlines_in_values.description=Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.allOf.0.description=An enumeration. +datasources.section.source-s3.format.oneOf.3.properties.unexpected_field_behavior.description=How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details +datasources.section.source-s3.path_pattern.description=A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files. +datasources.section.source-s3.provider.description=Use this to load files from S3 or S3-compatible services +datasources.section.source-s3.provider.properties.aws_access_key_id.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.aws_secret_access_key.description=In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary. +datasources.section.source-s3.provider.properties.bucket.description=Name of the S3 bucket where the file(s) exist. +datasources.section.source-s3.provider.properties.endpoint.description=Endpoint to an S3 compatible service. Leave empty to use AWS. +datasources.section.source-s3.provider.properties.path_prefix.description=By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate. +datasources.section.source-s3.provider.properties.use_ssl.description=Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.provider.properties.verify_ssl_cert.description=Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server +datasources.section.source-s3.schema.description=Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema. +datasources.section.source-salesloft.client_id.title=Client ID +datasources.section.source-salesloft.client_secret.title=Client Secret +datasources.section.source-salesloft.refresh_token.title=Refresh Token +datasources.section.source-salesloft.start_date.title=Start Date +datasources.section.source-salesloft.client_id.description=The Client ID of your Salesloft developer application. +datasources.section.source-salesloft.client_secret.description=The Client Secret of your Salesloft developer application. +datasources.section.source-salesloft.refresh_token.description=The token for obtaining a new access token. +datasources.section.source-salesloft.start_date.description=The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-search-metrics.api_key.title=API Key +datasources.section.source-search-metrics.client_secret.title=Client Secret +datasources.section.source-search-metrics.country_code.title=Country Code +datasources.section.source-search-metrics.start_date.title=Start Date +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.api_key.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.client_secret.description= +datasources.section.source-search-metrics.country_code.description=The region of the S3 staging bucket to use if utilising a copy strategy. +datasources.section.source-search-metrics.start_date.description=Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-sendgrid.apikey.title=Sendgrid API key +datasources.section.source-sendgrid.start_time.title=Start time +datasources.section.source-sendgrid.apikey.description=API Key, use admin to generate this key. +datasources.section.source-sendgrid.start_time.description=Start time in timestamp integer format. Any data before this timestamp will not be replicated. +datasources.section.source-sentry.auth_token.title=Authentication Tokens +datasources.section.source-sentry.hostname.title=Host Name +datasources.section.source-sentry.organization.title=Organization +datasources.section.source-sentry.project.title=Project +datasources.section.source-sentry.auth_token.description=Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/" +datasources.section.source-sentry.hostname.description=Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty. +datasources.section.source-sentry.organization.description=The slug of the organization the groups belong to. +datasources.section.source-sentry.project.description=The name (slug) of the Project you want to sync. +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.title=Password +datasources.section.source-sftp.credentials.oneOf.0.title=Password Authentication +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.title=SSH Private Key +datasources.section.source-sftp.credentials.oneOf.1.title=SSH Key Authentication +datasources.section.source-sftp.credentials.title=Authentication * +datasources.section.source-sftp.file_pattern.title=File Pattern (Optional) +datasources.section.source-sftp.file_types.title=File types +datasources.section.source-sftp.folder_path.title=Folder Path (Optional) +datasources.section.source-sftp.host.title=Host Address +datasources.section.source-sftp.port.title=Port +datasources.section.source-sftp.user.title=User Name +datasources.section.source-sftp.credentials.description=The server authentication method +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_method.description=Connect through password authentication +datasources.section.source-sftp.credentials.oneOf.0.properties.auth_user_password.description=OS-level password for logging into the jump server host +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_method.description=Connect through ssh key +datasources.section.source-sftp.credentials.oneOf.1.properties.auth_ssh_key.description=OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa ) +datasources.section.source-sftp.file_pattern.description=The regular expression to specify files for sync in a chosen Folder Path +datasources.section.source-sftp.file_types.description=Coma separated file types. Currently only 'csv' and 'json' types are supported. +datasources.section.source-sftp.folder_path.description=The directory to search files for sync +datasources.section.source-sftp.host.description=The server host address +datasources.section.source-sftp.port.description=The server port +datasources.section.source-sftp.user.description=The server user +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-shopify.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.title=API Password +datasources.section.source-shopify.credentials.oneOf.1.title=API Password +datasources.section.source-shopify.credentials.title=Shopify Authorization Method +datasources.section.source-shopify.shop.title=Shopify Store +datasources.section.source-shopify.start_date.title=Replication Start Date +datasources.section.source-shopify.credentials.description=The authorization method to use to retrieve data from Shopify +datasources.section.source-shopify.credentials.oneOf.0.description=OAuth2.0 +datasources.section.source-shopify.credentials.oneOf.0.properties.access_token.description=The Access Token for making authenticated requests. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_id.description=The Client ID of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.0.properties.client_secret.description=The Client Secret of the Shopify developer application. +datasources.section.source-shopify.credentials.oneOf.1.description=API Password Auth +datasources.section.source-shopify.credentials.oneOf.1.properties.api_password.description=The API Password for your private application in the `Shopify` store. +datasources.section.source-shopify.shop.description=The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'. +datasources.section.source-shopify.start_date.description=The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated. +datasources.section.source-shortio.domain_id.title=Domain ID +datasources.section.source-shortio.secret_key.title=Secret Key +datasources.section.source-shortio.start_date.title=Start Date +datasources.section.source-shortio.secret_key.description=Short.io Secret Key +datasources.section.source-shortio.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-slack.channel_filter.title=Channel name filter +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.title=Access token +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.title=Refresh token +datasources.section.source-slack.credentials.oneOf.0.title=Sign in via Slack (OAuth) +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-slack.credentials.oneOf.1.title=API Token +datasources.section.source-slack.credentials.title=Authentication mechanism +datasources.section.source-slack.join_channels.title=Join all channels +datasources.section.source-slack.lookback_window.title=Threads Lookback window (Days) +datasources.section.source-slack.start_date.title=Start Date +datasources.section.source-slack.channel_filter.description=A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter. +datasources.section.source-slack.credentials.description=Choose how to authenticate into Slack +datasources.section.source-slack.credentials.oneOf.0.properties.access_token.description=Slack access_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.0.properties.client_id.description=Slack client_id. See our docs if you need help finding this id. +datasources.section.source-slack.credentials.oneOf.0.properties.client_secret.description=Slack client_secret. See our docs if you need help finding this secret. +datasources.section.source-slack.credentials.oneOf.0.properties.refresh_token.description=Slack refresh_token. See our docs if you need help generating the token. +datasources.section.source-slack.credentials.oneOf.1.properties.api_token.description=A Slack bot token. See the docs for instructions on how to generate it. +datasources.section.source-slack.join_channels.description=Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. +datasources.section.source-slack.lookback_window.description=How far into the past to look for messages in threads. +datasources.section.source-slack.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-smartsheets.access_token.title=Access Token +datasources.section.source-smartsheets.spreadsheet_id.title=Sheet ID +datasources.section.source-smartsheets.start_datetime.title=Start Datetime (Optional) +datasources.section.source-smartsheets.access_token.description=The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token. +datasources.section.source-smartsheets.spreadsheet_id.description=The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties +datasources.section.source-smartsheets.start_datetime.description=Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00` +datasources.section.source-snapchat-marketing.client_id.title=Client ID +datasources.section.source-snapchat-marketing.client_secret.title=Client Secret +datasources.section.source-snapchat-marketing.end_date.title=End Date (Optional) +datasources.section.source-snapchat-marketing.refresh_token.title=Refresh Token +datasources.section.source-snapchat-marketing.start_date.title=Start Date +datasources.section.source-snapchat-marketing.client_id.description=The Client ID of your Snapchat developer application. +datasources.section.source-snapchat-marketing.client_secret.description=The Client Secret of your Snapchat developer application. +datasources.section.source-snapchat-marketing.end_date.description=Date in the format 2017-01-25. Any data after this date will not be replicated. +datasources.section.source-snapchat-marketing.refresh_token.description=Refresh Token to renew the expired Access Token. +datasources.section.source-snapchat-marketing.start_date.description=Date in the format 2022-01-01. Any data before this date will not be replicated. +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-snowflake.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.title=Password +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.title=Username +datasources.section.source-snowflake.credentials.oneOf.1.title=Username and Password +datasources.section.source-snowflake.credentials.title=Authorization Method +datasources.section.source-snowflake.database.title=Database +datasources.section.source-snowflake.host.title=Account Name +datasources.section.source-snowflake.jdbc_url_params.title=JDBC URL Params +datasources.section.source-snowflake.role.title=Role +datasources.section.source-snowflake.schema.title=Schema +datasources.section.source-snowflake.warehouse.title=Warehouse +datasources.section.source-snowflake.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_id.description=The Client ID of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Snowflake developer application. +datasources.section.source-snowflake.credentials.oneOf.0.properties.refresh_token.description=Refresh Token for making authenticated requests. +datasources.section.source-snowflake.credentials.oneOf.1.properties.password.description=The password associated with the username. +datasources.section.source-snowflake.credentials.oneOf.1.properties.username.description=The username you created to allow Airbyte to access the database. +datasources.section.source-snowflake.database.description=The database you created for Airbyte to access data. +datasources.section.source-snowflake.host.description=The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com). +datasources.section.source-snowflake.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). +datasources.section.source-snowflake.role.description=The role you created for Airbyte to access Snowflake. +datasources.section.source-snowflake.schema.description=The source Snowflake schema tables. +datasources.section.source-snowflake.warehouse.description=The warehouse you created for Airbyte to access data. +datasources.section.source-square.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-square.credentials.oneOf.0.title=Oauth authentication +datasources.section.source-square.credentials.oneOf.1.properties.api_key.title=API key token +datasources.section.source-square.credentials.oneOf.1.title=API Key +datasources.section.source-square.credentials.title=Credential Type +datasources.section.source-square.include_deleted_objects.title=Include Deleted Objects +datasources.section.source-square.is_sandbox.title=Sandbox +datasources.section.source-square.start_date.title=Start Date +datasources.section.source-square.credentials.oneOf.0.properties.client_id.description=The Square-issued ID of your application +datasources.section.source-square.credentials.oneOf.0.properties.client_secret.description=The Square-issued application secret for your application +datasources.section.source-square.credentials.oneOf.0.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-square.credentials.oneOf.1.properties.api_key.description=The API key for a Square application +datasources.section.source-square.include_deleted_objects.description=In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes) +datasources.section.source-square.is_sandbox.description=Determines whether to use the sandbox or production environment. +datasources.section.source-square.start_date.description=UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated. +datasources.section.source-strava.athlete_id.title=Athlete ID +datasources.section.source-strava.client_id.title=Client ID +datasources.section.source-strava.client_secret.title=Client Secret +datasources.section.source-strava.refresh_token.title=Refresh Token +datasources.section.source-strava.start_date.title=Start Date +datasources.section.source-strava.athlete_id.description=The Athlete ID of your Strava developer application. +datasources.section.source-strava.client_id.description=The Client ID of your Strava developer application. +datasources.section.source-strava.client_secret.description=The Client Secret of your Strava developer application. +datasources.section.source-strava.refresh_token.description=The Refresh Token with the activity: read_all permissions. +datasources.section.source-strava.start_date.description=UTC date and time. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.access_token.title=Access Token +datasources.section.source-surveymonkey.start_date.title=Start Date +datasources.section.source-surveymonkey.survey_ids.title=Survey Monkey survey IDs +datasources.section.source-surveymonkey.access_token.description=Access Token for making authenticated requests. See the docs for information on how to generate this key. +datasources.section.source-surveymonkey.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-surveymonkey.survey_ids.description=IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-talkdesk-explore.api_key.title=API KEY +datasources.section.source-talkdesk-explore.auth_url.title=AUTH URL +datasources.section.source-talkdesk-explore.start_date.title=START DATE +datasources.section.source-talkdesk-explore.timezone.title=TIMEZONE +datasources.section.source-talkdesk-explore.api_key.description=Talkdesk API key. +datasources.section.source-talkdesk-explore.auth_url.description=Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment. +datasources.section.source-talkdesk-explore.start_date.description=The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated. +datasources.section.source-talkdesk-explore.timezone.description=Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones) +datasources.section.source-tempo.api_token.title=API token +datasources.section.source-tempo.api_token.description=Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration. +datasources.section.source-tidb.database.title=Database +datasources.section.source-tidb.host.title=Host +datasources.section.source-tidb.jdbc_url_params.title=JDBC URL Params +datasources.section.source-tidb.password.title=Password +datasources.section.source-tidb.port.title=Port +datasources.section.source-tidb.ssl.title=SSL Connection +datasources.section.source-tidb.username.title=Username +datasources.section.source-tidb.database.description=Name of the database. +datasources.section.source-tidb.host.description=Hostname of the database. +datasources.section.source-tidb.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3) +datasources.section.source-tidb.password.description=Password associated with the username. +datasources.section.source-tidb.port.description=Port of the database. +datasources.section.source-tidb.ssl.description=Encrypt data using SSL. +datasources.section.source-tidb.username.description=Username to use to access the database. +datasources.section.source-timely.account_id.title=account_id +datasources.section.source-timely.bearer_token.title=Bearer token +datasources.section.source-timely.start_date.title=startDate +datasources.section.source-timely.account_id.description=Timely account id +datasources.section.source-timely.bearer_token.description=Timely bearer token +datasources.section.source-timely.start_date.description=start date +datasources.section.source-tplcentral.client_id.title=Client ID +datasources.section.source-tplcentral.client_secret.title=Client secret +datasources.section.source-tplcentral.customer_id.title=Customer ID +datasources.section.source-tplcentral.facility_id.title=Facility ID +datasources.section.source-tplcentral.start_date.title=Start date +datasources.section.source-tplcentral.tpl_key.title=3PL GUID +datasources.section.source-tplcentral.url_base.title=URL base +datasources.section.source-tplcentral.user_login.title=User login name +datasources.section.source-tplcentral.user_login_id.title=User login ID +datasources.section.source-tplcentral.start_date.description=Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00. +datasources.section.source-tplcentral.user_login.description=User login ID and/or name is required +datasources.section.source-tplcentral.user_login_id.description=User login ID and/or name is required +datasources.section.source-trello.board_ids.title=Trello Board IDs +datasources.section.source-trello.key.title=API key +datasources.section.source-trello.start_date.title=Start Date +datasources.section.source-trello.token.title=API token +datasources.section.source-trello.board_ids.description=IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated. +datasources.section.source-trello.key.description=Trello API key. See the docs for instructions on how to generate it. +datasources.section.source-trello.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-trello.token.description=Trello v API token. See the docs for instructions on how to generate it. +datasources.section.source-twilio.account_sid.title=Account ID +datasources.section.source-twilio.auth_token.title=Auth Token +datasources.section.source-twilio.lookback_window.title=Lookback window +datasources.section.source-twilio.start_date.title=Replication Start Date +datasources.section.source-twilio.account_sid.description=Twilio account SID +datasources.section.source-twilio.auth_token.description=Twilio Auth Token. +datasources.section.source-twilio.lookback_window.description=How far into the past to look for records. (in minutes) +datasources.section.source-twilio.start_date.description=UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated. +datasources.section.source-typeform.form_ids.title=Form IDs to replicate +datasources.section.source-typeform.start_date.title=Start Date +datasources.section.source-typeform.token.title=API Token +datasources.section.source-typeform.form_ids.description=When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel +datasources.section.source-typeform.start_date.description=UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated. +datasources.section.source-typeform.token.description=The API Token for a Typeform account. +datasources.section.source-us-census.api_key.description=Your API Key. Get your key here. +datasources.section.source-us-census.query_params.description=The query parameters portion of the GET request, without the api key +datasources.section.source-us-census.query_path.description=The path portion of the GET request +datasources.section.source-woocommerce.conversion_window_days.title=Conversion Window (Optional) +datasources.section.source-woocommerce.api_key.description=The CUSTOMER KEY for API in WooCommerce shop. +datasources.section.source-woocommerce.api_secret.description=The CUSTOMER SECRET for API in WooCommerce shop. +datasources.section.source-woocommerce.conversion_window_days.description=A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. +datasources.section.source-woocommerce.shop.description=The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. +datasources.section.source-woocommerce.start_date.description=The date you would like to replicate data. Format: YYYY-MM-DD. +datasources.section.source-yahoo-finance-price.interval.title=Interval +datasources.section.source-yahoo-finance-price.range.title=Range +datasources.section.source-yahoo-finance-price.interval.description=The interval of between prices queried. +datasources.section.source-yahoo-finance-price.range.description=The range of prices to be queried. +datasources.section.source-yahoo-finance-price.tickers.description=Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed. +datasources.section.source-youtube-analytics.credentials.properties.client_id.title=Client ID +datasources.section.source-youtube-analytics.credentials.properties.client_secret.title=Client Secret +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.title=Refresh Token +datasources.section.source-youtube-analytics.credentials.title=Authenticate via OAuth 2.0 +datasources.section.source-youtube-analytics.credentials.properties.client_id.description=The Client ID of your developer application +datasources.section.source-youtube-analytics.credentials.properties.client_secret.description=The client secret of your developer application +datasources.section.source-youtube-analytics.credentials.properties.refresh_token.description=A refresh token generated using the above client ID and secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.title=Refresh Token +datasources.section.source-zendesk-chat.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-chat.credentials.oneOf.1.title=Access Token +datasources.section.source-zendesk-chat.credentials.title=Authorization Method +datasources.section.source-zendesk-chat.start_date.title=Start Date +datasources.section.source-zendesk-chat.subdomain.title=Subdomain (Optional) +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-chat.credentials.oneOf.0.properties.refresh_token.description=Refresh Token to obtain new Access Token, when it's expired. +datasources.section.source-zendesk-chat.credentials.oneOf.1.properties.access_token.description=The Access Token to make authenticated requests. +datasources.section.source-zendesk-chat.start_date.description=The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-chat.subdomain.description=Required if you access Zendesk Chat from a Zendesk Support subdomain. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.title=Client ID +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.title=Client Secret +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-sunshine.credentials.title=Authorization Method +datasources.section.source-zendesk-sunshine.start_date.title=Start Date +datasources.section.source-zendesk-sunshine.subdomain.title=Subdomain +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.access_token.description=Long-term access Token for making authenticated requests. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.api_token.description=API Token. See the docs for information on how to generate this key. +datasources.section.source-zendesk-sunshine.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account +datasources.section.source-zendesk-sunshine.start_date.description=The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z. +datasources.section.source-zendesk-sunshine.subdomain.description=The subdomain for your Zendesk Account. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.title=Access Token +datasources.section.source-zendesk-support.credentials.oneOf.0.title=OAuth2.0 +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.title=API Token +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.title=Email +datasources.section.source-zendesk-support.credentials.oneOf.1.title=API Token +datasources.section.source-zendesk-support.credentials.title=Authentication * +datasources.section.source-zendesk-support.start_date.title=Start Date +datasources.section.source-zendesk-support.subdomain.title=Subdomain +datasources.section.source-zendesk-support.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-support.credentials.oneOf.0.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-support.credentials.oneOf.1.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-support.start_date.description=The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-support.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.title=Email +datasources.section.source-zendesk-talk.credentials.oneOf.0.title=API Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.title=Access Token +datasources.section.source-zendesk-talk.credentials.oneOf.1.title=OAuth2.0 +datasources.section.source-zendesk-talk.credentials.title=Authentication +datasources.section.source-zendesk-talk.start_date.title=Start Date +datasources.section.source-zendesk-talk.subdomain.title=Subdomain +datasources.section.source-zendesk-talk.credentials.description=Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.api_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.credentials.oneOf.0.properties.email.description=The user email for your Zendesk account. +datasources.section.source-zendesk-talk.credentials.oneOf.1.properties.access_token.description=The value of the API token generated. See the docs for more information. +datasources.section.source-zendesk-talk.start_date.description=The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. +datasources.section.source-zendesk-talk.subdomain.description=This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain. +datasources.section.source-zenloop.api_token.description=Zenloop API Token. You can get the API token in settings page here +datasources.section.source-zenloop.date_from.description=Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced +datasources.section.source-zenloop.survey_group_id.description=Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups +datasources.section.source-zenloop.survey_id.description=Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys +datasources.section.source-zoho-crm.client_id.title=Client ID +datasources.section.source-zoho-crm.client_secret.title=Client Secret +datasources.section.source-zoho-crm.dc_region.title=Data Center Location +datasources.section.source-zoho-crm.edition.title=Zoho CRM Edition +datasources.section.source-zoho-crm.environment.title=Environment +datasources.section.source-zoho-crm.refresh_token.title=Refresh Token +datasources.section.source-zoho-crm.start_datetime.title=Start Date +datasources.section.source-zoho-crm.client_id.description=OAuth2.0 Client ID +datasources.section.source-zoho-crm.client_secret.description=OAuth2.0 Client Secret +datasources.section.source-zoho-crm.dc_region.description=Please choose the region of your Data Center location. More info by this Link +datasources.section.source-zoho-crm.edition.description=Choose your Edition of Zoho CRM to determine API Concurrency Limits +datasources.section.source-zoho-crm.environment.description=Please choose the environment +datasources.section.source-zoho-crm.refresh_token.description=OAuth2.0 Refresh Token +datasources.section.source-zoho-crm.start_datetime.description=ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM` +datasources.section.source-zoom-singer.jwt.title=JWT Token +datasources.section.source-zoom-singer.jwt.description=Zoom JWT Token. See the docs for more information on how to obtain this key. +datasources.section.source-zuora.client_id.title=Client ID +datasources.section.source-zuora.client_secret.title=Client Secret +datasources.section.source-zuora.data_query.title=Data Query Type +datasources.section.source-zuora.start_date.title=Start Date +datasources.section.source-zuora.tenant_endpoint.title=Tenant Endpoint Location +datasources.section.source-zuora.window_in_days.title=Query Window (in days) +datasources.section.source-zuora.client_id.description=Your OAuth user Client ID +datasources.section.source-zuora.client_secret.description=Your OAuth user Client Secret +datasources.section.source-zuora.data_query.description=Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link +datasources.section.source-zuora.start_date.description=Start Date in format: YYYY-MM-DD +datasources.section.source-zuora.tenant_endpoint.description=Please choose the right endpoint where your Tenant is located. More info by this Link +datasources.section.source-zuora.window_in_days.description=The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year). diff --git a/jvm/src/main/resources/airbyte/source-airtable.json b/jvm/src/main/resources/airbyte/source-airtable.json new file mode 100644 index 0000000..0db138f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-airtable.json @@ -0,0 +1,34 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/airtable", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Airtable Source Spec", + "type": "object", + "required": ["api_key", "base_id", "tables"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "description": "The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key.", + "title": "API Key", + "airbyte_secret": true, + "examples": ["key1234567890"] + }, + "base_id": { + "type": "string", + "description": "The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs.", + "title": "Base ID", + "examples": ["app1234567890"] + }, + "tables": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of Tables to integrate.", + "title": "Tables", + "examples": ["table 1", "table 2"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-amazon-ads.json b/jvm/src/main/resources/airbyte/source-amazon-ads.json new file mode 100644 index 0000000..cf69708 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-amazon-ads.json @@ -0,0 +1,120 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/amazon-ads", + "connectionSpecification": { + "title": "Amazon Ads Spec", + "type": "object", + "properties": { + "auth_type": { + "title": "Auth Type", + "const": "oauth2.0", + "order": 0, + "type": "string" + }, + "client_id": { + "title": "Client ID", + "description": "The client ID of your Amazon Ads developer application. See the docs for more information.", + "order": 1, + "type": "string" + }, + "client_secret": { + "title": "Client Secret", + "description": "The client secret of your Amazon Ads developer application. See the docs for more information.", + "airbyte_secret": true, + "order": 2, + "type": "string" + }, + "refresh_token": { + "title": "Refresh Token", + "description": "Amazon Ads refresh token. See the docs for more information on how to obtain this token.", + "airbyte_secret": true, + "order": 3, + "type": "string" + }, + "region": { + "title": "Region *", + "description": "Region to pull data from (EU/NA/FE). See docs for more details.", + "enum": ["NA", "EU", "FE"], + "type": "string", + "default": "NA", + "order": 4 + }, + "report_wait_timeout": { + "title": "Report Wait Timeout *", + "description": "Timeout duration in minutes for Reports. Default is 30 minutes.", + "default": 30, + "examples": [30, 120], + "order": 5, + "type": "integer" + }, + "report_generation_max_retries": { + "title": "Report Generation Maximum Retries *", + "description": "Maximum retries Airbyte will attempt for fetching report data. Default is 5.", + "default": 5, + "examples": [5, 10, 15], + "order": 6, + "type": "integer" + }, + "start_date": { + "title": "Start Date (Optional)", + "description": "The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format", + "examples": ["2022-10-10", "2022-10-22"], + "order": 7, + "type": "string" + }, + "profiles": { + "title": "Profile IDs (Optional)", + "description": "Profile IDs you want to fetch data for. See docs for more details.", + "order": 8, + "type": "array", + "items": { + "type": "integer" + } + } + }, + "required": ["client_id", "client_secret", "refresh_token"], + "additionalProperties": true + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["auth_type"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-amazon-seller-partner.json b/jvm/src/main/resources/airbyte/source-amazon-seller-partner.json new file mode 100644 index 0000000..ab79387 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-amazon-seller-partner.json @@ -0,0 +1,236 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/amazon-seller-partner", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/amazon-seller-partner", + "connectionSpecification": { + "title": "Amazon Seller Partner Spec", + "type": "object", + "properties": { + "app_id": { + "title": "App Id *", + "description": "Your Amazon App ID", + "airbyte_secret": true, + "order": 0, + "type": "string" + }, + "auth_type": { + "title": "Auth Type", + "const": "oauth2.0", + "order": 1, + "type": "string" + }, + "lwa_app_id": { + "title": "LWA Client Id", + "description": "Your Login with Amazon Client ID.", + "order": 2, + "type": "string" + }, + "lwa_client_secret": { + "title": "LWA Client Secret", + "description": "Your Login with Amazon Client Secret.", + "airbyte_secret": true, + "order": 3, + "type": "string" + }, + "refresh_token": { + "title": "Refresh Token", + "description": "The Refresh Token obtained via OAuth flow authorization.", + "airbyte_secret": true, + "order": 4, + "type": "string" + }, + "aws_access_key": { + "title": "AWS Access Key", + "description": "Specifies the AWS access key used as part of the credentials to authenticate the user.", + "airbyte_secret": true, + "order": 5, + "type": "string" + }, + "aws_secret_key": { + "title": "AWS Secret Access Key", + "description": "Specifies the AWS secret key used as part of the credentials to authenticate the user.", + "airbyte_secret": true, + "order": 6, + "type": "string" + }, + "role_arn": { + "title": "Role ARN", + "description": "Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS).", + "airbyte_secret": true, + "order": 7, + "type": "string" + }, + "replication_start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string" + }, + "replication_end_date": { + "title": "End Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$|^$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string" + }, + "period_in_days": { + "title": "Period In Days", + "description": "Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync.", + "default": 30, + "examples": ["30", "365"], + "type": "integer" + }, + "report_options": { + "title": "Report Options", + "description": "Additional information passed to reports. This varies by report type. Must be a valid json string.", + "examples": [ + "{\"GET_BRAND_ANALYTICS_SEARCH_TERMS_REPORT\": {\"reportPeriod\": \"WEEK\"}}", + "{\"GET_SOME_REPORT\": {\"custom\": \"true\"}}" + ], + "type": "string" + }, + "max_wait_seconds": { + "title": "Max wait time for reports (in seconds)", + "description": "Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report.", + "default": 500, + "examples": ["500", "1980"], + "type": "integer" + }, + "aws_environment": { + "title": "AWSEnvironment", + "description": "An enumeration.", + "enum": ["PRODUCTION", "SANDBOX"], + "type": "string" + }, + "region": { + "title": "AWSRegion", + "description": "An enumeration.", + "enum": [ + "AE", + "AU", + "BR", + "CA", + "DE", + "EG", + "ES", + "FR", + "GB", + "IN", + "IT", + "JP", + "MX", + "NL", + "PL", + "SA", + "SE", + "SG", + "TR", + "UK", + "US" + ], + "type": "string" + } + }, + "required": [ + "lwa_app_id", + "lwa_client_secret", + "refresh_token", + "aws_access_key", + "aws_secret_key", + "role_arn", + "replication_start_date", + "aws_environment", + "region" + ], + "additionalProperties": true, + "definitions": { + "AWSEnvironment": { + "title": "AWSEnvironment", + "description": "An enumeration.", + "enum": ["PRODUCTION", "SANDBOX"], + "type": "string" + }, + "AWSRegion": { + "title": "AWSRegion", + "description": "An enumeration.", + "enum": [ + "AE", + "AU", + "BR", + "CA", + "DE", + "EG", + "ES", + "FR", + "GB", + "IN", + "IT", + "JP", + "MX", + "NL", + "PL", + "SA", + "SE", + "SG", + "TR", + "UK", + "US" + ], + "type": "string" + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["auth_type"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "app_id": { + "type": "string", + "path_in_connector_config": ["app_id"] + } + } + }, + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "lwa_app_id": { + "type": "string" + }, + "lwa_client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "lwa_app_id": { + "type": "string", + "path_in_connector_config": ["lwa_app_id"] + }, + "lwa_client_secret": { + "type": "string", + "path_in_connector_config": ["lwa_client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-amazon-sqs.json b/jvm/src/main/resources/airbyte/source-amazon-sqs.json new file mode 100644 index 0000000..3bddbaf --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-amazon-sqs.json @@ -0,0 +1,105 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/amazon-sqs", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Amazon SQS Source Spec", + "type": "object", + "required": ["queue_url", "region", "delete_messages"], + "additionalProperties": false, + "properties": { + "queue_url": { + "title": "Queue URL", + "description": "URL of the SQS Queue", + "type": "string", + "examples": [ + "https://sqs.eu-west-1.amazonaws.com/1234567890/my-example-queue" + ], + "order": 0 + }, + "region": { + "title": "AWS Region", + "description": "AWS Region of the SQS Queue", + "type": "string", + "enum": [ + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "me-south-1", + "us-gov-east-1", + "us-gov-west-1" + ], + "order": 1 + }, + "delete_messages": { + "title": "Delete Messages After Read", + "description": "If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail. ", + "type": "boolean", + "default": false, + "order": 2 + }, + "max_batch_size": { + "title": "Max Batch Size", + "description": "Max amount of messages to get in one batch (10 max)", + "type": "integer", + "examples": ["5"], + "order": 3 + }, + "max_wait_time": { + "title": "Max Wait Time", + "description": "Max amount of time in seconds to wait for messages in a single poll (20 max)", + "type": "integer", + "examples": ["5"], + "order": 4 + }, + "attributes_to_return": { + "title": "Message Attributes To Return", + "description": "Comma separated list of Mesage Attribute names to return", + "type": "string", + "examples": ["attr1,attr2"], + "order": 5 + }, + "visibility_timeout": { + "title": "Message Visibility Timeout", + "description": "Modify the Visibility Timeout of the individual message from the Queue's default (seconds).", + "type": "integer", + "examples": ["15"], + "order": 6 + }, + "access_key": { + "title": "AWS IAM Access Key ID", + "description": "The Access Key ID of the AWS IAM Role to use for pulling messages", + "type": "string", + "examples": ["xxxxxHRNxxx3TBxxxxxx"], + "airbyte_secret": true, + "order": 7 + }, + "secret_key": { + "title": "AWS IAM Secret Key", + "description": "The Secret Key of the AWS IAM Role to use for pulling messages", + "type": "string", + "examples": ["hu+qE5exxxxT6o/ZrKsxxxxxxBhxxXLexxxxxVKz"], + "airbyte_secret": true, + "order": 8 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-amplitude.json b/jvm/src/main/resources/airbyte/source-amplitude.json new file mode 100644 index 0000000..4460bdc --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-amplitude.json @@ -0,0 +1,31 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/amplitude", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Amplitude Spec", + "type": "object", + "required": ["api_key", "secret_key", "start_date"], + "additionalProperties": true, + "properties": { + "api_key": { + "type": "string", + "title": "API Key", + "description": "Amplitude API Key. See the setup guide for more information on how to obtain this key.", + "airbyte_secret": true + }, + "secret_key": { + "type": "string", + "title": "Secret Key", + "description": "Amplitude Secret Key. See the setup guide for more information on how to obtain this key.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Replication Start Date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2021-01-25T00:00:00Z"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-apify-dataset.json b/jvm/src/main/resources/airbyte/source-apify-dataset.json new file mode 100644 index 0000000..4d8c313 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-apify-dataset.json @@ -0,0 +1,22 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/apify-dataset", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Apify Dataset Spec", + "type": "object", + "required": ["datasetId"], + "additionalProperties": false, + "properties": { + "datasetId": { + "type": "string", + "title": "Dataset ID", + "description": "ID of the dataset you would like to load to Airbyte." + }, + "clean": { + "type": "boolean", + "title": "Clean", + "description": "If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-appsflyer.json b/jvm/src/main/resources/airbyte/source-appsflyer.json new file mode 100644 index 0000000..6ab830e --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-appsflyer.json @@ -0,0 +1,33 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Appsflyer Spec", + "type": "object", + "required": ["app_id", "api_token", "start_date"], + "additionalProperties": false, + "properties": { + "app_id": { + "type": "string", + "description": "App identifier as found in AppsFlyer." + }, + "api_token": { + "type": "string", + "description": "Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "description": "The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days.", + "examples": ["2021-11-16", "2021-11-16 15:00:00"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}( [0-9]{2}:[0-9]{2}:[0-9]{2})?$" + }, + "timezone": { + "type": "string", + "description": "Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console.", + "default": "UTC", + "examples": ["US/Pacific", "UTC"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-appstore-singer.json b/jvm/src/main/resources/airbyte/source-appstore-singer.json new file mode 100644 index 0000000..d104b72 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-appstore-singer.json @@ -0,0 +1,41 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/appstore", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Appstore Singer Spec", + "type": "object", + "required": ["key_id", "private_key", "issuer_id", "vendor", "start_date"], + "additionalProperties": false, + "properties": { + "key_id": { + "type": "string", + "title": "Key ID", + "description": "Appstore Key ID. See the docs for more information on how to obtain this key." + }, + "private_key": { + "type": "string", + "title": "Private Key", + "description": "Appstore Private Key. See the docs for more information on how to obtain this key.", + "airbyte_secret": true, + "multiline": true + }, + "issuer_id": { + "type": "string", + "title": "Issuer ID", + "description": "Appstore Issuer ID. See the docs for more information on how to obtain this ID." + }, + "vendor": { + "type": "string", + "title": "Vendor ID", + "description": "Appstore Vendor ID. See the docs for more information on how to obtain this ID." + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2020-11-16T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-asana.json b/jvm/src/main/resources/airbyte/source-asana.json new file mode 100644 index 0000000..83abe72 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-asana.json @@ -0,0 +1,76 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Asana Spec", + "type": "object", + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authentication mechanism", + "description": "Choose how to authenticate to Github", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "Authenticate with Personal Access Token", + "required": ["personal_access_token"], + "properties": { + "option_title": { + "type": "string", + "title": "Credentials title", + "description": "PAT Credentials", + "const": "PAT Credentials" + }, + "personal_access_token": { + "type": "string", + "title": "Personal Access Token", + "description": "Asana Personal Access Token (generate yours here).", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Authenticate via Asana (Oauth)", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "option_title": { + "type": "string", + "title": "Credentials title", + "description": "OAuth Credentials", + "const": "OAuth Credentials" + }, + "client_id": { + "type": "string", + "title": "", + "description": "", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "", + "description": "", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "", + "description": "", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 1], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-aws-cloudtrail.json b/jvm/src/main/resources/airbyte/source-aws-cloudtrail.json new file mode 100644 index 0000000..8489e44 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-aws-cloudtrail.json @@ -0,0 +1,42 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/aws-cloudtrail", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Aws CloudTrail Spec", + "type": "object", + "required": [ + "aws_key_id", + "aws_secret_key", + "aws_region_name", + "start_date" + ], + "additionalProperties": true, + "properties": { + "aws_key_id": { + "type": "string", + "title": "Key ID", + "description": "AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.", + "airbyte_secret": true + }, + "aws_secret_key": { + "type": "string", + "title": "Secret Key", + "description": "AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.", + "airbyte_secret": true + }, + "aws_region_name": { + "type": "string", + "title": "Region Name", + "description": "The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name." + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD.", + "examples": ["2021-01-01"], + "default": "1970-01-01", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-azure-table.json b/jvm/src/main/resources/airbyte/source-azure-table.json new file mode 100644 index 0000000..31b922d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-azure-table.json @@ -0,0 +1,35 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Azure Data Table Spec", + "type": "object", + "required": ["storage_account_name", "storage_access_key"], + "additionalProperties": false, + "properties": { + "storage_account_name": { + "title": "Account Name", + "type": "string", + "description": "The name of your storage account.", + "order": 0, + "airbyte_secret": false + }, + "storage_access_key": { + "title": "Access Key", + "type": "string", + "description": "Azure Table Storage Access Key. See the docs for more information on how to obtain this key.", + "order": 1, + "airbyte_secret": true + }, + "storage_endpoint_suffix": { + "title": "Endpoint Suffix", + "type": "string", + "description": "Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix", + "order": 2, + "default": "core.windows.net", + "examples": ["core.windows.net", "core.chinacloudapi.cn"], + "airbyte_secret": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-bamboo-hr.json b/jvm/src/main/resources/airbyte/source-bamboo-hr.json new file mode 100644 index 0000000..e74378a --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-bamboo-hr.json @@ -0,0 +1,31 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/bamboo-hr", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Bamboo HR Spec", + "type": "object", + "required": ["subdomain", "api_key"], + "additionalProperties": false, + "properties": { + "subdomain": { + "type": "string", + "description": "Sub Domain of bamboo hr" + }, + "api_key": { + "type": "string", + "description": "Api key of bamboo hr", + "airbyte_secret": true + }, + "custom_reports_fields": { + "type": "string", + "default": "", + "description": "Comma-separated list of fields to include in custom reports." + }, + "custom_reports_include_default_fields": { + "type": "boolean", + "default": true, + "description": "If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-bigcommerce.json b/jvm/src/main/resources/airbyte/source-bigcommerce.json new file mode 100644 index 0000000..9cc4bc6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-bigcommerce.json @@ -0,0 +1,30 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/bigcommerce", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BigCommerce Source CDK Specifications", + "type": "object", + "required": ["start_date", "store_hash", "access_token"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date you would like to replicate data. Format: YYYY-MM-DD.", + "examples": ["2021-01-01"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "store_hash": { + "type": "string", + "title": "Store Hash", + "description": "The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'." + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-bigquery.json b/jvm/src/main/resources/airbyte/source-bigquery.json new file mode 100644 index 0000000..13185cc --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-bigquery.json @@ -0,0 +1,31 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/bigquery", + "supportsIncremental": true, + "supportsNormalization": true, + "supportsDBT": true, + "supported_sync_modes": ["overwrite", "append", "append_dedup"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BigQuery Source Spec", + "type": "object", + "required": ["project_id", "credentials_json"], + "properties": { + "project_id": { + "type": "string", + "description": "The GCP project ID for the project containing the target BigQuery dataset.", + "title": "Project ID" + }, + "dataset_id": { + "type": "string", + "description": "The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery.", + "title": "Default Dataset ID" + }, + "credentials_json": { + "type": "string", + "description": "The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key.", + "title": "Credentials JSON", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-bing-ads.json b/jvm/src/main/resources/airbyte/source-bing-ads.json new file mode 100644 index 0000000..7807377 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-bing-ads.json @@ -0,0 +1,119 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/bing-ads", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Bing Ads Spec", + "type": "object", + "required": [ + "developer_token", + "client_id", + "refresh_token", + "reports_start_date" + ], + "additionalProperties": true, + "properties": { + "auth_method": { + "type": "string", + "const": "oauth2.0" + }, + "tenant_id": { + "type": "string", + "title": "Tenant ID", + "description": "The Tenant ID of your Microsoft Advertising developer application. Set this to \"common\" unless you know you need a different value.", + "airbyte_secret": true, + "default": "common", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your Microsoft Advertising developer application.", + "airbyte_secret": true, + "order": 1 + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your Microsoft Advertising developer application.", + "default": "", + "airbyte_secret": true, + "order": 2 + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Refresh Token to renew the expired Access Token.", + "airbyte_secret": true, + "order": 3 + }, + "developer_token": { + "type": "string", + "title": "Developer Token", + "description": "Developer token associated with user. See more info in the docs.", + "airbyte_secret": true, + "order": 4 + }, + "reports_start_date": { + "type": "string", + "title": "Reports replication start date", + "format": "date", + "default": "2020-01-01", + "description": "The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format.", + "order": 5 + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["auth_method"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "tenant_id": { + "type": "string", + "path_in_connector_config": ["tenant_id"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-braintree.json b/jvm/src/main/resources/airbyte/source-braintree.json new file mode 100644 index 0000000..4fd243e --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-braintree.json @@ -0,0 +1,45 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/braintree", + "connectionSpecification": { + "title": "Braintree Spec", + "type": "object", + "properties": { + "merchant_id": { + "title": "Merchant ID", + "description": "The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID.", + "name": "Merchant ID", + "type": "string" + }, + "public_key": { + "title": "Public Key", + "description": "Braintree Public Key. See the docs for more information on how to obtain this key.", + "name": "Public Key", + "type": "string" + }, + "private_key": { + "title": "Private Key", + "description": "Braintree Private Key. See the docs for more information on how to obtain this key.", + "name": "Private Key", + "airbyte_secret": true, + "type": "string" + }, + "start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "name": "Start Date", + "examples": ["2020", "2020-12-30", "2020-11-22 20:20:05"], + "type": "string", + "format": "date-time" + }, + "environment": { + "title": "Environment", + "description": "Environment specifies where the data will come from.", + "name": "Environment", + "examples": ["sandbox", "production", "qa", "development"], + "enum": ["Development", "Sandbox", "Qa", "Production"], + "type": "string" + } + }, + "required": ["merchant_id", "public_key", "private_key", "environment"] + } +} diff --git a/jvm/src/main/resources/airbyte/source-cart.json b/jvm/src/main/resources/airbyte/source-cart.json new file mode 100644 index 0000000..1d7fb81 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-cart.json @@ -0,0 +1,30 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/cart", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Cart.com Spec", + "type": "object", + "required": ["access_token", "start_date", "store_name"], + "additionalProperties": true, + "properties": { + "access_token": { + "type": "string", + "title": "Access Token", + "airbyte_secret": true, + "description": "Access Token for making authenticated requests." + }, + "store_name": { + "type": "string", + "title": "Store Name", + "description": "The name of Cart.com Online Store. All API URLs start with https://[mystorename.com]/api/v1/, where [mystorename.com] is the domain name of your store." + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "The date from which you'd like to replicate the data", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2021-01-01T00:00:00Z"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-chargebee.json b/jvm/src/main/resources/airbyte/source-chargebee.json new file mode 100644 index 0000000..33ece30 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-chargebee.json @@ -0,0 +1,38 @@ +{ + "documentationUrl": "https://apidocs.chargebee.com/docs/api", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Chargebee Spec", + "type": "object", + "required": ["site", "site_api_key", "start_date", "product_catalog"], + "additionalProperties": true, + "properties": { + "site": { + "type": "string", + "title": "Site", + "description": "The site prefix for your Chargebee instance.", + "examples": ["airbyte-test"] + }, + "site_api_key": { + "type": "string", + "title": "API Key", + "description": "Chargebee API Key. See the docs for more information on how to obtain this key.", + "examples": ["test_3yzfanAXF66USdWC9wQcM555DQJkSYoppu"], + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2021-01-25T00:00:00Z"] + }, + "product_catalog": { + "title": "Product Catalog", + "type": "string", + "description": "Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section.", + "enum": ["1.0", "2.0"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-chargify.json b/jvm/src/main/resources/airbyte/source-chargify.json new file mode 100644 index 0000000..4d77ed7 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-chargify.json @@ -0,0 +1,21 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/chargify", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Chargify Spec", + "type": "object", + "required": ["api_key", "domain"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "description": "Chargify API Key.", + "airbyte_secret": true + }, + "domain": { + "type": "string", + "description": "Chargify domain. Normally this domain follows the following format companyname.chargify.com" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-chartmogul.json b/jvm/src/main/resources/airbyte/source-chartmogul.json new file mode 100644 index 0000000..ff3d92d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-chartmogul.json @@ -0,0 +1,32 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/chartmogul", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Chartmogul Spec", + "type": "object", + "required": ["api_key", "start_date", "interval"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "description": "Chartmogul API key", + "airbyte_secret": true, + "order": 0 + }, + "start_date": { + "type": "string", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated.", + "examples": ["2017-01-25T00:00:00Z"], + "order": 1 + }, + "interval": { + "type": "string", + "description": "Some APIs such as Metrics require intervals to cluster data.", + "enum": ["day", "week", "month", "quarter"], + "default": "month", + "order": 2 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-clickhouse.json b/jvm/src/main/resources/airbyte/source-clickhouse.json new file mode 100644 index 0000000..7b28b58 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-clickhouse.json @@ -0,0 +1,48 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/clickhouse", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ClickHouse Source Spec", + "type": "object", + "required": ["host", "port", "database", "username"], + "properties": { + "host": { + "description": "The host endpoint of the Clickhouse cluster.", + "title": "Host", + "type": "string" + }, + "port": { + "description": "The port of the database.", + "title": "Port", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 8123, + "examples": ["8123"] + }, + "database": { + "description": "The name of the database.", + "title": "Database", + "type": "string", + "examples": ["default"] + }, + "username": { + "description": "The username which is used to access the database.", + "title": "Username", + "type": "string" + }, + "password": { + "description": "The password associated with this username.", + "title": "Password", + "type": "string", + "airbyte_secret": true + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL.", + "type": "boolean", + "default": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-close-com.json b/jvm/src/main/resources/airbyte/source-close-com.json new file mode 100644 index 0000000..2c067de --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-close-com.json @@ -0,0 +1,24 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/close-com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Close.com Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "description": "Close.com API key (usually starts with 'api_'; find yours here).", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "description": "The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD.", + "examples": ["2021-01-01"], + "default": "2021-01-01", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-cockroachdb.json b/jvm/src/main/resources/airbyte/source-cockroachdb.json new file mode 100644 index 0000000..68e798f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-cockroachdb.json @@ -0,0 +1,59 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/cockroachdb", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Cockroach Source Spec", + "type": "object", + "required": ["host", "port", "database", "username"], + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 5432, + "examples": ["5432"], + "order": 1 + }, + "database": { + "title": "DB Name", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "username": { + "title": "User", + "description": "Username to use to access the database.", + "type": "string", + "order": 3 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.", + "title": "JDBC URL Parameters (Advanced)", + "type": "string", + "order": 5 + }, + "ssl": { + "title": "Connect using SSL", + "description": "Encrypt client/server communications for increased security.", + "type": "boolean", + "default": false, + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-commercetools.json b/jvm/src/main/resources/airbyte/source-commercetools.json new file mode 100644 index 0000000..f70c16d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-commercetools.json @@ -0,0 +1,49 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/commercetools", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Commercetools Source CDK Specifications", + "type": "object", + "required": [ + "region", + "start_date", + "host", + "project_key", + "client_id", + "client_secret" + ], + "additionalProperties": false, + "properties": { + "region": { + "type": "string", + "description": "The region of the platform.", + "examples": ["us-central1", "australia-southeast1"] + }, + "host": { + "type": "string", + "enum": ["gcp", "aws"], + "description": "The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization" + }, + "start_date": { + "type": "string", + "description": "The date you would like to replicate data. Format: YYYY-MM-DD.", + "examples": ["2021-01-01"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "project_key": { + "type": "string", + "description": "The project key" + }, + "client_id": { + "type": "string", + "description": "Id of API Client.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "description": "The password of secret of API Client.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-confluence.json b/jvm/src/main/resources/airbyte/source-confluence.json new file mode 100644 index 0000000..136bd0b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-confluence.json @@ -0,0 +1,27 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Confluence Spec", + "type": "object", + "required": ["api_token", "domain_name", "email"], + "additionalProperties": false, + "properties": { + "api_token": { + "type": "string", + "description": "Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/", + "airbyte_secret": true + }, + "domain_name": { + "type": "string", + "description": "Your Confluence domain name", + "examples": ["example.atlassian.net"] + }, + "email": { + "type": "string", + "description": "Your Confluence login email", + "examples": ["abc@example.com"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-db2.json b/jvm/src/main/resources/airbyte/source-db2.json new file mode 100644 index 0000000..f964814 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-db2.json @@ -0,0 +1,95 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/db2", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "IBM Db2 Source Spec", + "type": "object", + "required": ["host", "port", "db", "username", "password", "encryption"], + "properties": { + "host": { + "description": "Host of the Db2.", + "type": "string", + "order": 0 + }, + "port": { + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 8123, + "examples": ["8123"], + "order": 1 + }, + "db": { + "description": "Name of the database.", + "type": "string", + "examples": ["default"], + "order": 2 + }, + "username": { + "description": "Username to use to access the database.", + "type": "string", + "order": 3 + }, + "password": { + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 5 + }, + "encryption": { + "title": "Encryption", + "type": "object", + "description": "Encryption method to use when communicating with the database", + "order": 6, + "oneOf": [ + { + "title": "Unencrypted", + "description": "Data transfer will not be encrypted.", + "required": ["encryption_method"], + "properties": { + "encryption_method": { + "type": "string", + "const": "unencrypted", + "enum": ["unencrypted"], + "default": "unencrypted" + } + } + }, + { + "title": "TLS Encrypted (verify certificate)", + "description": "Verify and use the cert provided by the server.", + "required": ["encryption_method", "ssl_certificate"], + "properties": { + "encryption_method": { + "type": "string", + "const": "encrypted_verify_certificate", + "enum": ["encrypted_verify_certificate"], + "default": "encrypted_verify_certificate" + }, + "ssl_certificate": { + "title": "SSL PEM file", + "description": "Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations", + "type": "string", + "airbyte_secret": true, + "multiline": true + }, + "key_store_password": { + "title": "Key Store Password. This field is optional. If you do not fill in this field, the password will be randomly generated.", + "description": "Key Store Password", + "type": "string", + "airbyte_secret": true + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-delighted.json b/jvm/src/main/resources/airbyte/source-delighted.json new file mode 100644 index 0000000..0292ddf --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-delighted.json @@ -0,0 +1,27 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Delighted Spec", + "type": "object", + "required": ["since", "api_key"], + "additionalProperties": false, + "properties": { + "since": { + "title": "Since", + "type": "string", + "description": "The date from which you'd like to replicate the data", + "examples": ["2022-05-30 04:50:23"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2} ([0-9]{2}:[0-9]{2}:[0-9]{2})?$", + "order": 0 + }, + "api_key": { + "title": "Delighted API Key", + "type": "string", + "description": "A Delighted API key.", + "airbyte_secret": true, + "order": 1 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-dixa.json b/jvm/src/main/resources/airbyte/source-dixa.json new file mode 100644 index 0000000..f15e4a6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-dixa.json @@ -0,0 +1,30 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/dixa", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Dixa Spec", + "type": "object", + "required": ["api_token", "start_date"], + "additionalProperties": false, + "properties": { + "api_token": { + "type": "string", + "description": "Dixa API token", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "description": "The connector pulls records updated from this date onwards.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "examples": ["YYYY-MM-DD"] + }, + "batch_size": { + "type": "integer", + "description": "Number of days to batch into one request. Max 31.", + "pattern": "^[0-9]{1,2}$", + "examples": [1, 31], + "default": 31 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-drift.json b/jvm/src/main/resources/airbyte/source-drift.json new file mode 100644 index 0000000..3b1ad1f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-drift.json @@ -0,0 +1,90 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/drift", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Drift Spec", + "type": "object", + "required": [], + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authorization Method", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": [ + "client_id", + "client_secret", + "access_token", + "refresh_token" + ], + "properties": { + "credentials": { + "type": "string", + "const": "oauth2.0", + "enum": ["oauth2.0"], + "default": "oauth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your Drift developer application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your Drift developer application.", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Refresh Token to renew the expired Access Token.", + "default": "", + "airbyte_secret": true + } + } + }, + { + "title": "Access Token", + "type": "object", + "required": ["access_token"], + "properties": { + "credentials": { + "type": "string", + "const": "access_token", + "enum": ["access_token"], + "default": "access_token", + "order": 0 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Drift Access Token. See the docs for more information on how to generate this key.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", "0"], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["access_token"], ["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-elasticsearch.json b/jvm/src/main/resources/airbyte/source-elasticsearch.json new file mode 100644 index 0000000..a2b88df --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-elasticsearch.json @@ -0,0 +1,82 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/source/elasticsearch", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Elasticsearch Connection Configuration", + "type": "object", + "required": ["endpoint"], + "additionalProperties": false, + "properties": { + "endpoint": { + "title": "Server Endpoint", + "type": "string", + "description": "The full url of the Elasticsearch server" + }, + "authenticationMethod": { + "title": "Authentication Method", + "type": "object", + "description": "The type of authentication to be used", + "oneOf": [ + { + "title": "None", + "additionalProperties": false, + "description": "No authentication will be used", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "none" + } + } + }, + { + "title": "Api Key/Secret", + "additionalProperties": false, + "description": "Use a api key and secret combination to authenticate", + "required": ["method", "apiKeyId", "apiKeySecret"], + "properties": { + "method": { + "type": "string", + "const": "secret" + }, + "apiKeyId": { + "title": "API Key ID", + "description": "The Key ID to used when accessing an enterprise Elasticsearch instance.", + "type": "string" + }, + "apiKeySecret": { + "title": "API Key Secret", + "description": "The secret associated with the API Key ID.", + "type": "string", + "airbyte_secret": true + } + } + }, + { + "title": "Username/Password", + "additionalProperties": false, + "description": "Basic auth header with a username and password", + "required": ["method", "username", "password"], + "properties": { + "method": { + "type": "string", + "const": "basic" + }, + "username": { + "title": "Username", + "description": "Basic auth username to access a secure Elasticsearch server", + "type": "string" + }, + "password": { + "title": "Password", + "description": "Basic auth password to access a secure Elasticsearch server", + "type": "string", + "airbyte_secret": true + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-facebook-marketing.json b/jvm/src/main/resources/airbyte/source-facebook-marketing.json new file mode 100644 index 0000000..706eae4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-facebook-marketing.json @@ -0,0 +1,345 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/facebook-marketing", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/facebook-marketing", + "connectionSpecification": { + "title": "Source Facebook Marketing", + "type": "object", + "properties": { + "account_id": { + "title": "Account ID", + "description": "The Facebook Ad account ID to use when pulling data from the Facebook Marketing API.", + "order": 0, + "examples": ["111111111111111"], + "type": "string" + }, + "start_date": { + "title": "Start Date", + "description": "The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "order": 1, + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string", + "format": "date-time" + }, + "end_date": { + "title": "End Date", + "description": "The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data.", + "order": 2, + "pattern": "^$|^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-26T00:00:00Z"], + "type": "string", + "format": "date-time" + }, + "access_token": { + "title": "Access Token", + "description": "The value of the access token generated. See the docs for more information", + "order": 3, + "airbyte_secret": true, + "type": "string" + }, + "include_deleted": { + "title": "Include Deleted", + "description": "Include data from deleted Campaigns, Ads, and AdSets", + "default": false, + "order": 4, + "type": "boolean" + }, + "fetch_thumbnail_images": { + "title": "Fetch Thumbnail Images", + "description": "In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url", + "default": false, + "order": 5, + "type": "boolean" + }, + "custom_insights": { + "title": "Custom Insights", + "description": "A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns)", + "order": 6, + "type": "array", + "items": { + "title": "InsightConfig", + "description": "Config for custom insights", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "The name value of insight", + "type": "string" + }, + "fields": { + "title": "Fields", + "description": "A list of chosen fields for fields parameter", + "default": [], + "type": "array", + "items": { + "title": "ValidEnums", + "description": "Generic enumeration.\n\nDerive from this class to define new enumerations.", + "enum": [ + "account_currency", + "account_id", + "account_name", + "action_values", + "actions", + "ad_bid_value", + "ad_click_actions", + "ad_id", + "ad_impression_actions", + "ad_name", + "adset_bid_value", + "adset_end", + "adset_id", + "adset_name", + "adset_start", + "age_targeting", + "attribution_setting", + "auction_bid", + "auction_competitiveness", + "auction_max_competitor_bid", + "buying_type", + "campaign_id", + "campaign_name", + "canvas_avg_view_percent", + "canvas_avg_view_time", + "catalog_segment_actions", + "catalog_segment_value", + "catalog_segment_value_mobile_purchase_roas", + "catalog_segment_value_omni_purchase_roas", + "catalog_segment_value_website_purchase_roas", + "clicks", + "conversion_rate_ranking", + "conversion_values", + "conversions", + "converted_product_quantity", + "converted_product_value", + "cost_per_15_sec_video_view", + "cost_per_2_sec_continuous_video_view", + "cost_per_action_type", + "cost_per_ad_click", + "cost_per_conversion", + "cost_per_dda_countby_convs", + "cost_per_estimated_ad_recallers", + "cost_per_inline_link_click", + "cost_per_inline_post_engagement", + "cost_per_one_thousand_ad_impression", + "cost_per_outbound_click", + "cost_per_thruplay", + "cost_per_unique_action_type", + "cost_per_unique_click", + "cost_per_unique_conversion", + "cost_per_unique_inline_link_click", + "cost_per_unique_outbound_click", + "cpc", + "cpm", + "cpp", + "created_time", + "ctr", + "date_start", + "date_stop", + "dda_countby_convs", + "dda_results", + "engagement_rate_ranking", + "estimated_ad_recall_rate", + "estimated_ad_recall_rate_lower_bound", + "estimated_ad_recall_rate_upper_bound", + "estimated_ad_recallers", + "estimated_ad_recallers_lower_bound", + "estimated_ad_recallers_upper_bound", + "frequency", + "full_view_impressions", + "full_view_reach", + "gender_targeting", + "impressions", + "inline_link_click_ctr", + "inline_link_clicks", + "inline_post_engagement", + "instant_experience_clicks_to_open", + "instant_experience_clicks_to_start", + "instant_experience_outbound_clicks", + "interactive_component_tap", + "labels", + "location", + "mobile_app_purchase_roas", + "objective", + "optimization_goal", + "outbound_clicks", + "outbound_clicks_ctr", + "place_page_name", + "purchase_roas", + "qualifying_question_qualify_answer_rate", + "quality_ranking", + "quality_score_ectr", + "quality_score_ecvr", + "quality_score_organic", + "reach", + "social_spend", + "spend", + "total_postbacks", + "unique_actions", + "unique_clicks", + "unique_conversions", + "unique_ctr", + "unique_inline_link_click_ctr", + "unique_inline_link_clicks", + "unique_link_clicks_ctr", + "unique_outbound_clicks", + "unique_outbound_clicks_ctr", + "unique_video_continuous_2_sec_watched_actions", + "unique_video_view_15_sec", + "updated_time", + "video_15_sec_watched_actions", + "video_30_sec_watched_actions", + "video_avg_time_watched_actions", + "video_continuous_2_sec_watched_actions", + "video_p100_watched_actions", + "video_p25_watched_actions", + "video_p50_watched_actions", + "video_p75_watched_actions", + "video_p95_watched_actions", + "video_play_actions", + "video_play_curve_actions", + "video_play_retention_0_to_15s_actions", + "video_play_retention_20_to_60s_actions", + "video_play_retention_graph_actions", + "video_thruplay_watched_actions", + "video_time_watched_actions", + "website_ctr", + "website_purchase_roas", + "wish_bid" + ] + } + }, + "breakdowns": { + "title": "Breakdowns", + "description": "A list of chosen breakdowns for breakdowns", + "default": [], + "type": "array", + "items": { + "title": "ValidBreakdowns", + "description": "Generic enumeration.\n\nDerive from this class to define new enumerations.", + "enum": [ + "ad_format_asset", + "age", + "app_id", + "body_asset", + "call_to_action_asset", + "country", + "description_asset", + "device_platform", + "dma", + "frequency_value", + "gender", + "hourly_stats_aggregated_by_advertiser_time_zone", + "hourly_stats_aggregated_by_audience_time_zone", + "image_asset", + "impression_device", + "link_url_asset", + "place_page_id", + "platform_position", + "product_id", + "publisher_platform", + "region", + "skan_conversion_id", + "title_asset", + "video_asset" + ] + } + }, + "action_breakdowns": { + "title": "Action Breakdowns", + "description": "A list of chosen action_breakdowns for action_breakdowns", + "default": [], + "type": "array", + "items": { + "title": "ValidActionBreakdowns", + "description": "Generic enumeration.\n\nDerive from this class to define new enumerations.", + "enum": [ + "action_canvas_component_name", + "action_carousel_card_id", + "action_carousel_card_name", + "action_destination", + "action_device", + "action_reaction", + "action_target_id", + "action_type", + "action_video_sound", + "action_video_type" + ] + } + }, + "time_increment": { + "title": "Time Increment", + "description": "Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only).", + "default": 1, + "exclusiveMaximum": 90, + "exclusiveMinimum": 0, + "type": "integer" + }, + "start_date": { + "title": "Start Date", + "description": "The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string", + "format": "date-time" + }, + "end_date": { + "title": "End Date", + "description": "The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. All data generated between the start date and this date will be replicated. Not setting this option will result in always syncing the latest data.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-26T00:00:00Z"], + "type": "string", + "format": "date-time" + }, + "insights_lookback_window": { + "title": "Custom Insights Lookback Window", + "description": "The attribution window", + "default": 28, + "maximum": 28, + "mininum": 1, + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": ["name"] + } + }, + "page_size": { + "title": "Page Size of Requests", + "description": "Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases.", + "default": 100, + "order": 7, + "exclusiveMinimum": 0, + "type": "integer" + }, + "insights_lookback_window": { + "title": "Insights Lookback Window", + "description": "The attribution window", + "default": 28, + "order": 8, + "maximum": 28, + "mininum": 1, + "exclusiveMinimum": 0, + "type": "integer" + }, + "max_batch_size": { + "title": "Maximum size of Batched Requests", + "description": "Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases.", + "default": 50, + "order": 9, + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": ["account_id", "start_date", "access_token"] + }, + "supportsIncremental": true, + "supported_destination_sync_modes": ["append"], + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["access_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-facebook-pages.json b/jvm/src/main/resources/airbyte/source-facebook-pages.json new file mode 100755 index 0000000..0320a9f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-facebook-pages.json @@ -0,0 +1,31 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/facebook-pages", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Facebook Pages Spec", + "type": "object", + "required": ["access_token", "page_id"], + "additionalProperties": true, + "properties": { + "access_token": { + "type": "string", + "title": "Page Access Token", + "description": "Facebook Page Access Token", + "airbyte_secret": true + }, + "page_id": { + "type": "string", + "title": "Page ID", + "description": "Page ID" + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["access_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-faker.json b/jvm/src/main/resources/airbyte/source-faker.json new file mode 100644 index 0000000..c056a36 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-faker.json @@ -0,0 +1,43 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/faker", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Faker Source Spec", + "type": "object", + "required": ["count"], + "additionalProperties": false, + "properties": { + "count": { + "title": "Count", + "description": "How many users should be generated in total. This setting does not apply to the purchases or products stream.", + "type": "integer", + "minimum": 1, + "default": 1000, + "order": 0 + }, + "seed": { + "title": "Seed", + "description": "Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random)", + "type": "integer", + "default": -1, + "order": 1 + }, + "records_per_sync": { + "title": "Records Per Sync", + "description": "How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records.", + "type": "integer", + "minimum": 1, + "default": 500, + "order": 2 + }, + "records_per_slice": { + "title": "Records Per Stream Slice", + "description": "How many fake records will be in each page (stream slice), before a state message is emitted?", + "type": "integer", + "minimum": 1, + "default": 100, + "order": 3 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-file-secure.json b/jvm/src/main/resources/airbyte/source-file-secure.json new file mode 100644 index 0000000..9dc5be7 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-file-secure.json @@ -0,0 +1,224 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/file", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "File Source Spec", + "type": "object", + "additionalProperties": true, + "required": ["dataset_name", "format", "url", "provider"], + "properties": { + "dataset_name": { + "type": "string", + "title": "Dataset Name", + "description": "The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only)." + }, + "format": { + "type": "string", + "enum": ["csv", "json", "jsonl", "excel", "excel_binary", "feather", "parquet", "yaml"], + "default": "csv", + "title": "File Format", + "description": "The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs)." + }, + "reader_options": { + "type": "string", + "title": "Reader Options", + "description": "This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.", + "examples": ["{}", "{\"sep\": \" \"}"] + }, + "url": { + "type": "string", + "title": "URL", + "description": "The URL path to access the file which should be replicated." + }, + "provider": { + "type": "object", + "title": "Storage Provider", + "description": "The storage Provider or Location of the file(s) which should be replicated.", + "default": "Public Web", + "oneOf": [ + { + "title": "HTTPS: Public Web", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "const": "HTTPS" + }, + "user_agent": { + "type": "boolean", + "title": "User-Agent", + "default": false, + "description": "Add User-Agent to request" + } + } + }, + { + "title": "GCS: Google Cloud Storage", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "GCS" + }, + "service_account_json": { + "type": "string", + "title": "Service Account JSON", + "description": "In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary." + } + } + }, + { + "title": "S3: Amazon Web Services", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "S3" + }, + "aws_access_key_id": { + "type": "string", + "title": "AWS Access Key ID", + "description": "In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary." + }, + "aws_secret_access_key": { + "type": "string", + "title": "AWS Secret Access Key", + "description": "In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true + } + } + }, + { + "title": "AzBlob: Azure Blob Storage", + "required": ["storage", "storage_account"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "AzBlob" + }, + "storage_account": { + "type": "string", + "title": "Storage Account", + "description": "The globally unique name of the storage account that the desired blob sits within. See here for more details." + }, + "sas_token": { + "type": "string", + "title": "SAS Token", + "description": "To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true + }, + "shared_key": { + "type": "string", + "title": "Shared Key", + "description": "To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true + } + } + }, + { + "title": "SSH: Secure Shell", + "required": ["storage", "user", "host"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "SSH" + }, + "user": { + "type": "string", + "title": "User", + "description": "" + }, + "password": { + "type": "string", + "title": "Password", + "description": "", + "airbyte_secret": true + }, + "host": { + "type": "string", + "title": "Host", + "description": "" + }, + "port": { + "type": "string", + "title": "Port", + "default": "22", + "description": "" + } + } + }, + { + "title": "SCP: Secure copy protocol", + "required": ["storage", "user", "host"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "SCP" + }, + "user": { + "type": "string", + "title": "User", + "description": "" + }, + "password": { + "type": "string", + "title": "Password", + "description": "", + "airbyte_secret": true + }, + "host": { + "type": "string", + "title": "Host", + "description": "" + }, + "port": { + "type": "string", + "title": "Port", + "default": "22", + "description": "" + } + } + }, + { + "title": "SFTP: Secure File Transfer Protocol", + "required": ["storage", "user", "host"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "SFTP" + }, + "user": { + "type": "string", + "title": "User", + "description": "" + }, + "password": { + "type": "string", + "title": "Password", + "description": "", + "airbyte_secret": true + }, + "host": { + "type": "string", + "title": "Host", + "description": "" + }, + "port": { + "type": "string", + "title": "Port", + "default": "22", + "description": "" + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-file.json b/jvm/src/main/resources/airbyte/source-file.json new file mode 100644 index 0000000..9af744d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-file.json @@ -0,0 +1,237 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/file", + + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "File Source Spec", + "type": "object", + "additionalProperties": true, + "required": ["dataset_name", "format", "url", "provider"], + "properties": { + "dataset_name": { + "type": "string", + "title": "Dataset Name", + "description": "The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only)." + }, + "format": { + "type": "string", + "enum": ["csv", "json", "jsonl", "excel", "excel_binary", "feather", "parquet", "yaml"], + "default": "csv", + "title": "File Format", + "description": "The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs)." + }, + "reader_options": { + "type": "string", + "title": "Reader Options", + "description": "This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.", + "examples": ["{}", "{\"sep\": \" \"}"] + }, + "url": { + "type": "string", + "title": "URL", + "description": "The URL path to access the file which should be replicated." + }, + "provider": { + "type": "object", + "title": "Storage Provider", + "description": "The storage Provider or Location of the file(s) which should be replicated.", + "default": "Public Web", + "oneOf": [ + { + "title": "HTTPS: Public Web", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "const": "HTTPS" + }, + "user_agent": { + "type": "boolean", + "title": "User-Agent", + "default": false, + "description": "Add User-Agent to request" + } + } + }, + { + "title": "GCS: Google Cloud Storage", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "GCS" + }, + "service_account_json": { + "type": "string", + "title": "Service Account JSON", + "description": "In order to access private Buckets stored on Google Cloud, this connector would need a service account json credentials with the proper permissions as described here. Please generate the credentials.json file and copy/paste its content to this field (expecting JSON formats). If accessing publicly available data, this field is not necessary." + } + } + }, + { + "title": "S3: Amazon Web Services", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "S3" + }, + "aws_access_key_id": { + "type": "string", + "title": "AWS Access Key ID", + "description": "In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary." + }, + "aws_secret_access_key": { + "type": "string", + "title": "AWS Secret Access Key", + "description": "In order to access private Buckets stored on AWS S3, this connector would need credentials with the proper permissions. If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true + } + } + }, + { + "title": "AzBlob: Azure Blob Storage", + "required": ["storage", "storage_account"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "AzBlob" + }, + "storage_account": { + "type": "string", + "title": "Storage Account", + "description": "The globally unique name of the storage account that the desired blob sits within. See here for more details." + }, + "sas_token": { + "type": "string", + "title": "SAS Token", + "description": "To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a SAS (Shared Access Signature) token. If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true + }, + "shared_key": { + "type": "string", + "title": "Shared Key", + "description": "To access Azure Blob Storage, this connector would need credentials with the proper permissions. One option is a storage account shared key (aka account key or access key). If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true + } + } + }, + { + "title": "SSH: Secure Shell", + "required": ["storage", "user", "host"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "SSH" + }, + "user": { + "type": "string", + "title": "User", + "description": "" + }, + "password": { + "type": "string", + "title": "Password", + "description": "", + "airbyte_secret": true + }, + "host": { + "type": "string", + "title": "Host", + "description": "" + }, + "port": { + "type": "string", + "title": "Port", + "default": "22", + "description": "" + } + } + }, + { + "title": "SCP: Secure copy protocol", + "required": ["storage", "user", "host"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "SCP" + }, + "user": { + "type": "string", + "title": "User", + "description": "" + }, + "password": { + "type": "string", + "title": "Password", + "description": "", + "airbyte_secret": true + }, + "host": { + "type": "string", + "title": "Host", + "description": "" + }, + "port": { + "type": "string", + "title": "Port", + "default": "22", + "description": "" + } + } + }, + { + "title": "SFTP: Secure File Transfer Protocol", + "required": ["storage", "user", "host"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "const": "SFTP" + }, + "user": { + "type": "string", + "title": "User", + "description": "" + }, + "password": { + "type": "string", + "title": "Password", + "description": "", + "airbyte_secret": true + }, + "host": { + "type": "string", + "title": "Host", + "description": "" + }, + "port": { + "type": "string", + "title": "Port", + "default": "22", + "description": "" + } + } + }, + { + "title": "Local Filesystem (limited)", + "required": ["storage"], + "properties": { + "storage": { + "type": "string", + "title": "Storage", + "description": "WARNING: Note that the local storage URL available for reading must start with the local mount \"/local/\" at the moment until we implement more advanced docker mounting options.", + "const": "local" + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-firebolt.json b/jvm/src/main/resources/airbyte/source-firebolt.json new file mode 100644 index 0000000..b3423e1 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-firebolt.json @@ -0,0 +1,44 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/firebolt", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Firebolt Spec", + "type": "object", + "required": ["username", "password", "database"], + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "title": "Username", + "description": "Firebolt email address you use to login.", + "examples": ["username@email.com"] + }, + "password": { + "type": "string", + "title": "Password", + "description": "Firebolt password." + }, + "account": { + "type": "string", + "title": "Account", + "description": "Firebolt account to login." + }, + "host": { + "type": "string", + "title": "Host", + "description": "The host name of your Firebolt database.", + "examples": ["api.app.firebolt.io"] + }, + "database": { + "type": "string", + "title": "Database", + "description": "The database to connect to." + }, + "engine": { + "type": "string", + "title": "Engine", + "description": "Engine name or url to connect to." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-flexport.json b/jvm/src/main/resources/airbyte/source-flexport.json new file mode 100644 index 0000000..8589bb8 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-flexport.json @@ -0,0 +1,24 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/flexport", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Flexport Spec", + "additionalProperties": true, + "type": "object", + "required": ["api_key", "start_date"], + "properties": { + "api_key": { + "order": 0, + "type": "string", + "title": "API Key", + "airbyte_secret": true + }, + "start_date": { + "order": 1, + "title": "Start Date", + "type": "string", + "format": "date-time" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-freshcaller.json b/jvm/src/main/resources/airbyte/source-freshcaller.json new file mode 100644 index 0000000..e320217 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-freshcaller.json @@ -0,0 +1,41 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/freshcaller", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Freshcaller Spec", + "type": "object", + "required": ["domain", "api_key", "start_date"], + "additionalProperties": true, + "properties": { + "domain": { + "type": "string", + "title": "Domain for Freshcaller account", + "description": "Used to construct Base URL for the Freshcaller APIs", + "examples": ["snaptravel"] + }, + "api_key": { + "type": "string", + "title": "API Key", + "description": "Freshcaller API Key. See the docs for more information on how to obtain this key.", + "airbyte_secret": true + }, + "requests_per_minute": { + "title": "Requests per minute", + "type": "integer", + "description": "The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account." + }, + "start_date": { + "title": "Start Date", + "description": "UTC date and time. Any data created after this date will be replicated.", + "format": "date-time", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2022-01-01T12:00:00Z"] + }, + "sync_lag_minutes": { + "title": "Lag in minutes for each sync", + "type": "integer", + "description": "Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-freshdesk.json b/jvm/src/main/resources/airbyte/source-freshdesk.json new file mode 100644 index 0000000..7faa355 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-freshdesk.json @@ -0,0 +1,38 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/freshdesk", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Freshdesk Spec", + "type": "object", + "required": ["domain", "api_key"], + "additionalProperties": true, + "properties": { + "domain": { + "type": "string", + "description": "Freshdesk domain", + "title": "Domain", + "examples": ["myaccount.freshdesk.com"], + "pattern": "^[a-zA-Z0-9._-]*\\.freshdesk\\.com$" + }, + "api_key": { + "type": "string", + "title": "API Key", + "description": "Freshdesk API Key. See the docs for more information on how to obtain this key.", + "airbyte_secret": true + }, + "requests_per_minute": { + "title": "Requests per minute", + "type": "integer", + "description": "The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account." + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated.", + "format": "date-time", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2020-12-01T00:00:00Z"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-freshsales.json b/jvm/src/main/resources/airbyte/source-freshsales.json new file mode 100644 index 0000000..fee78c2 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-freshsales.json @@ -0,0 +1,24 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/freshsales", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Freshsales Spec", + "type": "object", + "required": ["domain_name", "api_key"], + "additionalProperties": false, + "properties": { + "domain_name": { + "type": "string", + "title": "Domain Name", + "description": "The Name of your Freshsales domain", + "examples": ["mydomain.myfreshworks.com"] + }, + "api_key": { + "type": "string", + "title": "API Key", + "description": "Freshsales API Key. See here. The key is case sensitive.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-freshservice.json b/jvm/src/main/resources/airbyte/source-freshservice.json new file mode 100644 index 0000000..05f9981 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-freshservice.json @@ -0,0 +1,31 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/freshservice", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Freshservice Spec", + "type": "object", + "required": ["domain_name", "api_key", "start_date"], + "additionalProperties": false, + "properties": { + "domain_name": { + "type": "string", + "title": "Domain Name", + "description": "The name of your Freshservice domain", + "examples": ["mydomain.freshservice.com"] + }, + "api_key": { + "title": "API Key", + "type": "string", + "description": "Freshservice API Key. See here. The key is case sensitive.", + "airbyte_secret": true + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2020-10-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-github.json b/jvm/src/main/resources/airbyte/source-github.json new file mode 100644 index 0000000..3823d86 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-github.json @@ -0,0 +1,134 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/github", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GitHub Source Spec", + "type": "object", + "required": ["start_date", "repository"], + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authentication *", + "description": "Choose how to authenticate to GitHub", + "type": "object", + "order": 0, + "oneOf": [ + { + "type": "object", + "title": "OAuth", + "required": ["access_token"], + "properties": { + "option_title": { + "type": "string", + "const": "OAuth Credentials", + "order": 0 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "OAuth access token", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Personal Access Token", + "required": ["personal_access_token"], + "properties": { + "option_title": { + "type": "string", + "const": "PAT Credentials", + "order": 0 + }, + "personal_access_token": { + "type": "string", + "title": "Personal Access Tokens", + "description": "Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with \",\"", + "airbyte_secret": true + } + } + } + ] + }, + "start_date": { + "type": "string", + "title": "Start date", + "description": "The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info", + "examples": ["2021-03-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "order": 1 + }, + "repository": { + "type": "string", + "examples": [ + "airbytehq/airbyte airbytehq/another-repo", + "airbytehq/*", + "airbytehq/airbyte" + ], + "title": "GitHub Repositories", + "description": "Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories.", + "order": 2 + }, + "branch": { + "type": "string", + "title": "Branch (Optional)", + "examples": ["airbytehq/airbyte/master airbytehq/airbyte/my-branch"], + "description": "Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled.", + "order": 3 + }, + "page_size_for_large_streams": { + "type": "integer", + "title": "Page size for large streams (Optional)", + "minimum": 1, + "maximum": 100, + "default": 10, + "description": "The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30.", + "order": 4 + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "option_title"], + "predicate_value": "OAuth Credentials", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-gitlab.json b/jvm/src/main/resources/airbyte/source-gitlab.json new file mode 100644 index 0000000..8ea35a9 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-gitlab.json @@ -0,0 +1,43 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/gitlab", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source GitLab Singer Spec", + "type": "object", + "required": ["api_url", "private_token", "start_date"], + "additionalProperties": false, + "properties": { + "api_url": { + "type": "string", + "examples": ["gitlab.com"], + "title": "API URL", + "description": "Please enter your basic URL from GitLab instance." + }, + "private_token": { + "type": "string", + "title": "Private Token", + "description": "Log into your GitLab account and then generate a personal Access Token.", + "airbyte_secret": true + }, + "groups": { + "type": "string", + "examples": ["airbyte.io"], + "title": "Groups", + "description": "Space-delimited list of groups. e.g. airbyte.io." + }, + "projects": { + "type": "string", + "title": "Projects", + "examples": ["airbyte.io/documentation"], + "description": "Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab." + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2021-03-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-google-ads.json b/jvm/src/main/resources/airbyte/source-google-ads.json new file mode 100644 index 0000000..b929331 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-google-ads.json @@ -0,0 +1,138 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/google-ads", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google Ads Spec", + "type": "object", + "required": ["credentials", "start_date", "customer_id"], + "additionalProperties": true, + "properties": { + "credentials": { + "type": "object", + "description": "", + "title": "Google Credentials", + "order": 0, + "required": [ + "developer_token", + "client_id", + "client_secret", + "refresh_token" + ], + "properties": { + "developer_token": { + "type": "string", + "title": "Developer Token", + "order": 0, + "description": "Developer token granted by Google to use their APIs. More instruction on how to find this value in our docs", + "airbyte_secret": true + }, + "client_id": { + "type": "string", + "title": "Client ID", + "order": 1, + "description": "The Client ID of your Google Ads developer application. More instruction on how to find this value in our docs" + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "order": 2, + "description": "The Client Secret of your Google Ads developer application. More instruction on how to find this value in our docs", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "order": 3, + "description": "The token for obtaining a new access token. More instruction on how to find this value in our docs", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token (Optional)", + "order": 4, + "description": "Access Token for making authenticated requests. More instruction on how to find this value in our docs", + "airbyte_secret": true + } + } + }, + "customer_id": { + "title": "Customer ID(s)", + "type": "string", + "description": "Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account.", + "pattern": "^[0-9]{10}(,[0-9]{10})*$", + "examples": ["6783948572,5839201945"], + "order": 1 + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "examples": ["2017-01-25"], + "order": 2 + }, + "end_date": { + "type": "string", + "title": "End Date (Optional)", + "description": "UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.", + "pattern": "^$|^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "examples": ["2017-01-30"], + "order": 6 + }, + "custom_queries": { + "type": "array", + "title": "Custom GAQL Queries (Optional)", + "description": "", + "order": 3, + "items": { + "type": "object", + "properties": { + "query": { + "type": "string", + "title": "Custom Query", + "description": "A custom defined GAQL query for building the report. Should not contain segments.date expression because it is used by incremental streams. See Google's query builder for more information.", + "examples": [ + "SELECT segments.ad_destination_type, campaign.advertising_channel_sub_type FROM campaign WHERE campaign.status = 'PAUSED'" + ] + }, + "table_name": { + "type": "string", + "title": "Destination Table Name", + "description": "The table name in your destination database for choosen query." + } + } + } + }, + "login_customer_id": { + "type": "string", + "title": "Login Customer ID for Managed Accounts (Optional)", + "description": "If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here", + "pattern": "^([0-9]{10})?$", + "examples": ["7349206847"], + "order": 4 + }, + "conversion_window_days": { + "title": "Conversion Window (Optional)", + "type": "integer", + "description": "A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation.", + "minimum": 0, + "maximum": 1095, + "default": 14, + "examples": [14], + "order": 5 + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials"], + "oauthFlowInitParameters": [ + ["client_id"], + ["client_secret"], + ["developer_token"] + ], + "oauthFlowOutputParameters": [["access_token"], ["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-google-analytics-data-api.json b/jvm/src/main/resources/airbyte/source-google-analytics-data-api.json new file mode 100644 index 0000000..cc683ce --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-google-analytics-data-api.json @@ -0,0 +1,115 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/google-analytics-v4", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google Analytics 4 (GA4) Spec", + "type": "object", + "required": ["property_id", "date_ranges_start_date"], + "additionalProperties": true, + "properties": { + "property_id": { + "type": "string", + "title": "Property ID", + "description": "A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body", + "order": 1 + }, + "credentials": { + "order": 0, + "type": "object", + "title": "Credentials", + "description": "Credentials for the service", + "oneOf": [ + { + "title": "Authenticate via Google (Oauth)", + "type": "object", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Google Analytics developer application.", + "airbyte_secret": true, + "order": 1 + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Google Analytics developer application.", + "airbyte_secret": true, + "order": 2 + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "The token for obtaining a new access token.", + "airbyte_secret": true, + "order": 3 + }, + "access_token": { + "title": "Access Token (Optional)", + "type": "string", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true, + "order": 4 + } + } + }, + { + "type": "object", + "title": "Service Account Key Authentication", + "required": ["credentials_json"], + "properties": { + "auth_type": { + "type": "string", + "const": "Service", + "order": 0 + }, + "credentials_json": { + "title": "Service Account JSON Key", + "type": "string", + "description": "The JSON key of the service account to use for authorization", + "examples": [ + "{ \"type\": \"service_account\", \"project_id\": YOUR_PROJECT_ID, \"private_key_id\": YOUR_PRIVATE_KEY, ... }" + ], + "airbyte_secret": true + } + } + } + ] + }, + "date_ranges_start_date": { + "type": "string", + "title": "Date Range Start Date", + "description": "The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD", + "order": 2 + }, + "custom_reports": { + "order": 3, + "type": "string", + "title": "Custom Reports (Optional)", + "description": "A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field." + }, + "window_in_days": { + "type": "integer", + "title": "Data request time increment in days (Optional)", + "description": "The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. ", + "examples": [30, 60, 90, 120, 200, 364], + "default": 1, + "order": 4 + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["access_token"], ["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-google-analytics-v4.json b/jvm/src/main/resources/airbyte/source-google-analytics-v4.json new file mode 100644 index 0000000..a83d8b3 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-google-analytics-v4.json @@ -0,0 +1,116 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/google-analytics-universal-analytics", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google Analytics (Universal Analytics) Spec", + "type": "object", + "required": ["view_id", "start_date"], + "additionalProperties": true, + "properties": { + "credentials": { + "order": 0, + "type": "object", + "title": "Credentials", + "description": "Credentials for the service", + "oneOf": [ + { + "title": "Authenticate via Google (Oauth)", + "type": "object", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Google Analytics developer application.", + "airbyte_secret": true, + "order": 1 + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Google Analytics developer application.", + "airbyte_secret": true, + "order": 2 + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "The token for obtaining a new access token.", + "airbyte_secret": true, + "order": 3 + }, + "access_token": { + "title": "Access Token (Optional)", + "type": "string", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true, + "order": 4 + } + } + }, + { + "type": "object", + "title": "Service Account Key Authentication", + "required": ["credentials_json"], + "properties": { + "auth_type": { + "type": "string", + "const": "Service", + "order": 0 + }, + "credentials_json": { + "title": "Service Account JSON Key", + "type": "string", + "description": "The JSON key of the service account to use for authorization", + "examples": [ + "{ \"type\": \"service_account\", \"project_id\": YOUR_PROJECT_ID, \"private_key_id\": YOUR_PRIVATE_KEY, ... }" + ], + "airbyte_secret": true + } + } + } + ] + }, + "start_date": { + "order": 1, + "type": "string", + "title": "Replication Start Date", + "description": "The date in the format YYYY-MM-DD. Any data before this date will not be replicated.", + "examples": ["2020-06-01"] + }, + "view_id": { + "order": 2, + "type": "string", + "title": "View ID", + "description": "The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer." + }, + "custom_reports": { + "order": 3, + "type": "string", + "title": "Custom Reports (Optional)", + "description": "A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field." + }, + "window_in_days": { + "type": "integer", + "title": "Data request time increment in days (Optional)", + "description": "The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364. ", + "examples": [30, 60, 90, 120, 200, 364], + "default": 1, + "order": 4 + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["access_token"], ["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-google-directory.json b/jvm/src/main/resources/airbyte/source-google-directory.json new file mode 100644 index 0000000..2eda4d3 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-google-directory.json @@ -0,0 +1,90 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/google-directory", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google Directory Spec", + "type": "object", + "required": [], + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Google Credentials", + "description": "Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios.", + "type": "object", + "oneOf": [ + { + "title": "Sign in via Google (OAuth)", + "description": "For these scenario user only needs to give permission to read Google Directory data.", + "type": "object", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "credentials_title": { + "type": "string", + "title": "Credentials Title", + "description": "Authentication Scenario", + "const": "Web server app", + "enum": ["Web server app"], + "default": "Web server app", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of the developer application.", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client secret", + "type": "string", + "description": "The Client Secret of the developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "The Token for obtaining a new access token.", + "airbyte_secret": true + } + } + }, + { + "title": "Service Account Key", + "description": "For these scenario user should obtain service account's credentials from the Google API Console and provide delegated email.", + "type": "object", + "required": ["credentials_json", "email"], + "properties": { + "credentials_title": { + "type": "string", + "title": "Credentials Title", + "description": "Authentication Scenario", + "const": "Service accounts", + "enum": ["Service accounts"], + "default": "Service accounts", + "order": 0 + }, + "credentials_json": { + "type": "string", + "title": "Credentials JSON", + "description": "The contents of the JSON service account key. See the docs for more information on how to generate this key.", + "airbyte_secret": true + }, + "email": { + "type": "string", + "title": "Email", + "description": "The email of the user, which has permissions to access the Google Workspace Admin APIs." + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-google-search-console.json b/jvm/src/main/resources/airbyte/source-google-search-console.json new file mode 100755 index 0000000..ec8474d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-google-search-console.json @@ -0,0 +1,119 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/google-search-console", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google Search Console Spec", + "type": "object", + "required": ["site_urls", "start_date", "authorization"], + "properties": { + "site_urls": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Website URL Property", + "description": "The URLs of the website property attached to your GSC account. Read more here.", + "examples": ["https://example1.com", "https://example2.com"], + "order": 0 + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "UTC date in the format 2017-01-25. Any data before this date will not be replicated.", + "examples": ["2021-01-01"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "order": 1 + }, + "end_date": { + "type": "string", + "title": "End Date", + "description": "UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field.", + "examples": ["2021-12-12"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "order": 2 + }, + "authorization": { + "type": "object", + "title": "Authentication Type", + "description": "", + "order": 3, + "oneOf": [ + { + "title": "OAuth", + "type": "object", + "required": [ + "auth_type", + "client_id", + "client_secret", + "refresh_token" + ], + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The client ID of your Google Search Console developer application. Read more here.", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The client secret of your Google Search Console developer application. Read more here.", + "airbyte_secret": true + }, + "access_token": { + "title": "Access Token", + "type": "string", + "description": "Access token for making authenticated requests. Read more here.", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "The token for obtaining a new access token. Read more here.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Service Account Key Authentication", + "required": ["auth_type", "service_account_info", "email"], + "properties": { + "auth_type": { + "type": "string", + "const": "Service", + "order": 0 + }, + "service_account_info": { + "title": "Service Account JSON Key", + "type": "string", + "description": "The JSON key of the service account to use for authorization. Read more here.", + "examples": [ + "{ \"type\": \"service_account\", \"project_id\": YOUR_PROJECT_ID, \"private_key_id\": YOUR_PRIVATE_KEY, ... }" + ] + }, + "email": { + "title": "Admin Email", + "type": "string", + "description": "The email of the user which has permissions to access the Google Workspace Admin APIs." + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["authorization", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["access_token"], ["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-google-workspace-admin-reports.json b/jvm/src/main/resources/airbyte/source-google-workspace-admin-reports.json new file mode 100644 index 0000000..a807909 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-google-workspace-admin-reports.json @@ -0,0 +1,30 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/google-workspace-admin-reports", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Google Directory Spec", + "type": "object", + "required": ["credentials_json", "email"], + "additionalProperties": false, + "properties": { + "credentials_json": { + "type": "string", + "title": "Credentials JSON", + "description": "The contents of the JSON service account key. See the docs for more information on how to generate this key.", + "airbyte_secret": true + }, + "email": { + "type": "string", + "title": "Email", + "description": "The email of the user, who has permissions to access the Google Workspace Admin APIs." + }, + "lookback": { + "type": "integer", + "title": "Lookback Window in Days", + "minimum": 0, + "maximum": 180, + "description": "Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-greenhouse.json b/jvm/src/main/resources/airbyte/source-greenhouse.json new file mode 100644 index 0000000..9ecfded --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-greenhouse.json @@ -0,0 +1,19 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/greenhouse", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Greenhouse Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": true, + "properties": { + "api_key": { + "title": "API Key", + "type": "string", + "description": "Greenhouse API Key. See the docs for more information on how to generate this key.", + "airbyte_secret": true, + "order": 0 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-harvest.json b/jvm/src/main/resources/airbyte/source-harvest.json new file mode 100644 index 0000000..75b9cda --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-harvest.json @@ -0,0 +1,137 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/harvest", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Harvest Spec", + "type": "object", + "required": ["account_id", "replication_start_date"], + "additionalProperties": true, + "properties": { + "account_id": { + "title": "Account ID", + "description": "Harvest account ID. Required for all Harvest requests in pair with Personal Access Token", + "airbyte_secret": true, + "type": "string", + "order": 0 + }, + "replication_start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string", + "order": 1 + }, + "credentials": { + "title": "Authentication mechanism", + "description": "Choose how to authenticate to Harvest.", + "type": "object", + "order": 2, + "oneOf": [ + { + "type": "object", + "title": "Authenticate via Harvest (OAuth)", + "required": ["client_id", "client_secret", "refresh_token"], + "additionalProperties": true, + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Harvest developer application." + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Harvest developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "Refresh Token to renew the expired Access Token.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Authenticate with Personal Access Token", + "required": ["api_token"], + "additionalProperties": true, + "properties": { + "auth_type": { + "type": "string", + "const": "Token", + "order": 0 + }, + "api_token": { + "title": "Personal Access Token", + "description": "Log into Harvest and then create new personal access token.", + "type": "string", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "supportsIncremental": true, + "supported_destination_sync_modes": ["append"], + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "Client", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-hellobaton.json b/jvm/src/main/resources/airbyte/source-hellobaton.json new file mode 100644 index 0000000..50793f6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-hellobaton.json @@ -0,0 +1,22 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Hellobaton Spec", + "type": "object", + "required": ["api_key", "company"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "description": "authentication key required to access the api endpoints", + "airbyte_secret": true + }, + "company": { + "type": "string", + "description": "Company name that generates your base api url", + "examples": ["google", "facebook", "microsoft"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-hubplanner.json b/jvm/src/main/resources/airbyte/source-hubplanner.json new file mode 100644 index 0000000..aa06182 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-hubplanner.json @@ -0,0 +1,17 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/hubplanner", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Hubplanner Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": true, + "properties": { + "api_key": { + "type": "string", + "description": "Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-instagram.json b/jvm/src/main/resources/airbyte/source-instagram.json new file mode 100644 index 0000000..f5bf710 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-instagram.json @@ -0,0 +1,35 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/instagram", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/instagram", + "connectionSpecification": { + "title": "Source Instagram", + "type": "object", + "properties": { + "start_date": { + "title": "Start Date", + "description": "The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string", + "format": "date-time" + }, + "access_token": { + "title": "Access Token", + "description": "The value of the access token generated. See the docs for more information", + "airbyte_secret": true, + "type": "string" + } + }, + "required": ["start_date", "access_token"] + }, + "supportsIncremental": true, + "supported_destination_sync_modes": ["append"], + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["access_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-intercom.json b/jvm/src/main/resources/airbyte/source-intercom.json new file mode 100644 index 0000000..f431e15 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-intercom.json @@ -0,0 +1,33 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/intercom", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Intercom Spec", + "type": "object", + "required": ["start_date", "access_token"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2020-11-16T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "access_token": { + "title": "Access token", + "type": "string", + "description": "Access token for making authenticated requests. See the Intercom docs for more information.", + "airbyte_secret": true + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["access_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-iterable.json b/jvm/src/main/resources/airbyte/source-iterable.json new file mode 100644 index 0000000..7f4a960 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-iterable.json @@ -0,0 +1,27 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/iterable", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Iterable Spec", + "type": "object", + "required": ["start_date", "api_key"], + "additionalProperties": true, + "properties": { + "api_key": { + "type": "string", + "title": "API Key", + "description": "Iterable API Key. See the docs for more information on how to obtain this key.", + "airbyte_secret": true, + "order": 0 + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2021-04-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "order": 1 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-jdbc.json b/jvm/src/main/resources/airbyte/source-jdbc.json new file mode 100644 index 0000000..38b1e8f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-jdbc.json @@ -0,0 +1,27 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/postgres", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "JDBC Source Spec", + "type": "object", + "required": ["username", "jdbc_url"], + "properties": { + "username": { + "title": "Username", + "description": "The username which is used to access the database.", + "type": "string" + }, + "password": { + "title": "Password", + "description": "The password associated with this username.", + "type": "string", + "airbyte_secret": true + }, + "jdbc_url": { + "title": "JDBC URL", + "description": "JDBC formatted URL. See the standard here.", + "type": "string" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-jira.json b/jvm/src/main/resources/airbyte/source-jira.json new file mode 100644 index 0000000..0c581c5 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-jira.json @@ -0,0 +1,73 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/jira", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Jira Spec", + "type": "object", + "required": ["api_token", "domain", "email"], + "additionalProperties": true, + "properties": { + "api_token": { + "type": "string", + "title": "API Token", + "description": "Jira API Token. See the docs for more information on how to generate this key.", + "airbyte_secret": true + }, + "domain": { + "type": "string", + "title": "Domain", + "examples": ["domainname.atlassian.net"], + "pattern": "^[a-zA-Z0-9._-]*\\.atlassian\\.net$", + "description": "The Domain for your Jira account, e.g. airbyteio.atlassian.net" + }, + "email": { + "type": "string", + "title": "Email", + "description": "The user email for your Jira account." + }, + "projects": { + "type": "array", + "title": "Projects", + "items": { + "type": "string" + }, + "examples": ["PROJ1", "PROJ2"], + "description": "List of Jira project keys to replicate data for." + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues.", + "examples": ["2021-03-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "additional_fields": { + "type": "array", + "title": "Additional Fields", + "items": { + "type": "string" + }, + "description": "List of additional fields to include in replicating issues.", + "examples": ["customfield_10096", "customfield_10071"] + }, + "expand_issue_changelog": { + "type": "boolean", + "title": "Expand Issue Changelog", + "description": "Expand the changelog when replicating issues.", + "default": false + }, + "render_fields": { + "type": "boolean", + "title": "Render Issue Fields", + "description": "Render issue fields in HTML format in addition to Jira JSON-like format.", + "default": false + }, + "enable_experimental_streams": { + "type": "boolean", + "title": "Enable Experimental Streams", + "description": "Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.io/integrations/sources/jira#experimental-tables for more info.", + "default": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-kafka.json b/jvm/src/main/resources/airbyte/source-kafka.json new file mode 100644 index 0000000..1a64203 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-kafka.json @@ -0,0 +1,231 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/kafka", + "supportsIncremental": true, + "supportsNormalization": false, + "supportsDBT": false, + "supported_source_sync_modes": ["append"], + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kafka Source Spec", + "type": "object", + "required": ["bootstrap_servers", "subscription", "protocol"], + "additionalProperties": false, + "properties": { + "bootstrap_servers": { + "title": "Bootstrap Servers", + "description": "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).", + "type": "string", + "examples": ["kafka-broker1:9092,kafka-broker2:9092"] + }, + "subscription": { + "title": "Subscription Method", + "type": "object", + "description": "You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.", + "oneOf": [ + { + "title": "Manually assign a list of partitions", + "required": ["subscription_type", "topic_partitions"], + "properties": { + "subscription_type": { + "description": "Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).\nIf the given list of topic partitions is empty, it is treated the same as unsubscribe().", + "type": "string", + "const": "assign", + "enum": ["assign"], + "default": "assign" + }, + "topic_partitions": { + "title": "List of topic:partition Pairs", + "type": "string", + "examples": ["sample.topic:0, sample.topic:1"] + } + } + }, + { + "title": "Subscribe to all topics matching specified pattern", + "required": ["subscription_type", "topic_pattern"], + "properties": { + "subscription_type": { + "description": "The Topic pattern from which the records will be read.", + "type": "string", + "const": "subscribe", + "enum": ["subscribe"], + "default": "subscribe" + }, + "topic_pattern": { + "title": "Topic Pattern", + "type": "string", + "examples": ["sample.topic"] + } + } + } + ] + }, + "test_topic": { + "title": "Test Topic", + "description": "The Topic to test in case the Airbyte can consume messages.", + "type": "string", + "examples": ["test.topic"] + }, + "group_id": { + "title": "Group ID", + "description": "The Group ID is how you distinguish different consumer groups.", + "type": "string", + "examples": ["group.id"] + }, + "max_poll_records": { + "title": "Max Poll Records", + "description": "The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.", + "type": "integer", + "default": 500 + }, + "polling_time": { + "title": "Polling Time", + "description": "Amount of time Kafka connector should try to poll for messages.", + "type": "integer", + "default": 100 + }, + "protocol": { + "title": "Protocol", + "type": "object", + "description": "The Protocol used to communicate with brokers.", + "oneOf": [ + { + "title": "PLAINTEXT", + "required": ["security_protocol"], + "properties": { + "security_protocol": { + "type": "string", + "enum": ["PLAINTEXT"], + "default": "PLAINTEXT" + } + } + }, + { + "title": "SASL PLAINTEXT", + "required": [ + "security_protocol", + "sasl_mechanism", + "sasl_jaas_config" + ], + "properties": { + "security_protocol": { + "type": "string", + "enum": ["SASL_PLAINTEXT"], + "default": "SASL_PLAINTEXT" + }, + "sasl_mechanism": { + "title": "SASL Mechanism", + "description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.", + "type": "string", + "default": "PLAIN", + "enum": ["PLAIN"] + }, + "sasl_jaas_config": { + "title": "SASL JAAS Config", + "description": "The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.", + "type": "string", + "default": "", + "airbyte_secret": true + } + } + }, + { + "title": "SASL SSL", + "required": [ + "security_protocol", + "sasl_mechanism", + "sasl_jaas_config" + ], + "properties": { + "security_protocol": { + "type": "string", + "enum": ["SASL_SSL"], + "default": "SASL_SSL" + }, + "sasl_mechanism": { + "title": "SASL Mechanism", + "description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.", + "type": "string", + "default": "GSSAPI", + "enum": [ + "GSSAPI", + "OAUTHBEARER", + "SCRAM-SHA-256", + "SCRAM-SHA-512", + "PLAIN" + ] + }, + "sasl_jaas_config": { + "title": "SASL JAAS Config", + "description": "The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.", + "type": "string", + "default": "", + "airbyte_secret": true + } + } + } + ] + }, + "client_id": { + "title": "Client ID", + "description": "An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.", + "type": "string", + "examples": ["airbyte-consumer"] + }, + "enable_auto_commit": { + "title": "Enable Auto Commit", + "description": "If true, the consumer's offset will be periodically committed in the background.", + "type": "boolean", + "default": true + }, + "auto_commit_interval_ms": { + "title": "Auto Commit Interval, ms", + "description": "The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.", + "type": "integer", + "default": 5000 + }, + "client_dns_lookup": { + "title": "Client DNS Lookup", + "description": "Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.", + "type": "string", + "default": "use_all_dns_ips", + "enum": [ + "default", + "use_all_dns_ips", + "resolve_canonical_bootstrap_servers_only" + ] + }, + "retry_backoff_ms": { + "title": "Retry Backoff, ms", + "description": "The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.", + "type": "integer", + "default": 100 + }, + "request_timeout_ms": { + "title": "Request Timeout, ms", + "description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.", + "type": "integer", + "default": 30000 + }, + "receive_buffer_bytes": { + "title": "Receive Buffer, bytes", + "description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.", + "type": "integer", + "default": 32768 + }, + "auto_offset_reset": { + "title": "Auto Offset Reset", + "description": "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer.", + "type": "string", + "default": "latest", + "enum": ["latest", "earliest", "none"] + }, + "repeated_calls": { + "title": "Repeated Calls", + "description": "The number of repeated calls to poll() if no messages were received.", + "type": "integer", + "default": 3 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-klaviyo.json b/jvm/src/main/resources/airbyte/source-klaviyo.json new file mode 100644 index 0000000..914600b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-klaviyo.json @@ -0,0 +1,25 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/klaviyo", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/klaviyo", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Klaviyo Spec", + "type": "object", + "properties": { + "api_key": { + "title": "Api Key", + "description": "Klaviyo API Key. See our docs if you need help finding this key.", + "airbyte_secret": true, + "type": "string" + }, + "start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string" + } + }, + "required": ["api_key", "start_date"] + } +} diff --git a/jvm/src/main/resources/airbyte/source-kustomer-singer.json b/jvm/src/main/resources/airbyte/source-kustomer-singer.json new file mode 100644 index 0000000..19e62c7 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-kustomer-singer.json @@ -0,0 +1,24 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/kustomer", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Kustomer Singer Spec", + "type": "object", + "required": ["api_token", "start_date"], + "additionalProperties": true, + "properties": { + "api_token": { + "title": "API Token", + "type": "string", + "description": "Kustomer API Token. See the docs on how to obtain this", + "airbyte_secret": true + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "The date from which you'd like to replicate the data", + "examples": ["2019-01-01T00:00:00Z"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-kyriba.json b/jvm/src/main/resources/airbyte/source-kyriba.json new file mode 100644 index 0000000..3705013 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-kyriba.json @@ -0,0 +1,44 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kyriba Spec", + "type": "object", + "required": ["domain", "username", "password", "start_date"], + "additionalProperties": false, + "properties": { + "domain": { + "type": "string", + "description": "Kyriba domain", + "title": "Domain", + "examples": ["demo.kyriba.com"], + "pattern": "^[a-zA-Z0-9._-]*\\.[a-zA-Z0-9._-]*\\.[a-z]*" + }, + "username": { + "type": "string", + "description": "Username to be used in basic auth", + "title": "Username" + }, + "password": { + "type": "string", + "description": "Password to be used in basic auth", + "title": "Password", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "description": "The date the sync should start from.", + "title": "Start Date", + "examples": ["2021-01-10"], + "pattern": "^\\d{4}\\-(0[1-9]|1[012])\\-(0[1-9]|[12][0-9]|3[01])$" + }, + "end_date": { + "type": "string", + "description": "The date the sync should end. If let empty the sync will run to the current date.", + "title": "End Date", + "examples": ["2022-03-01"], + "pattern": "^(?:(\\d{4}\\-(0[1-9]|1[012])\\-(0[1-9]|[12][0-9]|3[01]))|)$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-lemlist.json b/jvm/src/main/resources/airbyte/source-lemlist.json new file mode 100644 index 0000000..9aa5133 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-lemlist.json @@ -0,0 +1,18 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/lemlist", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Lemlist Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "title": "API key", + "description": "Lemlist API key.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-linkedin-ads.json b/jvm/src/main/resources/airbyte/source-linkedin-ads.json new file mode 100644 index 0000000..688ab0f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-linkedin-ads.json @@ -0,0 +1,88 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/linkedin-ads", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Linkedin Ads Spec", + "type": "object", + "required": ["start_date"], + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authentication *", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "auth_method": { + "type": "string", + "const": "oAuth2.0" + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The client ID of the LinkedIn Ads developer application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client secret", + "description": "The client secret the LinkedIn Ads developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh token", + "description": "The key to refresh the expired access token.", + "airbyte_secret": true + } + } + }, + { + "title": "Access token", + "type": "object", + "required": ["access_token"], + "properties": { + "auth_method": { + "type": "string", + "const": "access_token" + }, + "access_token": { + "type": "string", + "title": "Access token", + "description": "The token value generated using the authentication code. See the docs to obtain yours.", + "airbyte_secret": true + } + } + } + ] + }, + "start_date": { + "type": "string", + "title": "Start date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "description": "UTC date in the format 2020-09-17. Any data before this date will not be replicated.", + "examples": ["2021-05-17"] + }, + "account_ids": { + "title": "Account IDs (Optional)", + "type": "array", + "description": "Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.", + "items": { + "type": "integer" + }, + "default": [] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", "0"], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-linkedin-pages.json b/jvm/src/main/resources/airbyte/source-linkedin-pages.json new file mode 100644 index 0000000..a335440 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-linkedin-pages.json @@ -0,0 +1,79 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/linkedin-pages/", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Linkedin Pages Spec", + "type": "object", + "required": ["org_id"], + "additionalProperties": true, + "properties": { + "org_id": { + "title": "Organization ID", + "type": "integer", + "airbyte_secret": true, + "description": "Specify the Organization ID", + "examples": ["123456789"] + }, + "credentials": { + "title": "Authentication *", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "auth_method": { + "type": "string", + "const": "oAuth2.0" + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The client ID of the LinkedIn developer application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client secret", + "description": "The client secret of the LinkedIn developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh token", + "description": "The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.", + "airbyte_secret": true + } + } + }, + { + "title": "Access token", + "type": "object", + "required": ["access_token"], + "properties": { + "auth_method": { + "type": "string", + "const": "access_token" + }, + "access_token": { + "type": "string", + "title": "Access token", + "description": "The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", "0"], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-linnworks.json b/jvm/src/main/resources/airbyte/source-linnworks.json new file mode 100644 index 0000000..7afcdce --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-linnworks.json @@ -0,0 +1,33 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/linnworks", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Linnworks Spec", + "type": "object", + "required": ["application_id", "application_secret", "token", "start_date"], + "additionalProperties": false, + "properties": { + "application_id": { + "title": "Application ID.", + "description": "Linnworks Application ID", + "type": "string" + }, + "application_secret": { + "title": "Application Secret", + "description": "Linnworks Application Secret", + "type": "string", + "airbyte_secret": true + }, + "token": { + "title": "API Token", + "type": "string" + }, + "start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "type": "string", + "format": "date-time" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-looker.json b/jvm/src/main/resources/airbyte/source-looker.json new file mode 100644 index 0000000..ee9f9dc --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-looker.json @@ -0,0 +1,41 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/looker", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Looker Spec", + "type": "object", + "required": ["domain", "client_id", "client_secret"], + "additionalProperties": false, + "properties": { + "domain": { + "type": "string", + "title": "Domain", + "examples": [ + "domainname.looker.com", + "looker.clientname.com", + "123.123.124.123:8000" + ], + "description": "Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address" + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key." + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret is second part of an API3 key." + }, + "run_look_ids": { + "title": "Look IDs to Run", + "type": "array", + "items": { + "type": "string", + "pattern": "^[0-9]*$" + }, + "description": "The IDs of any Looks to run (optional)" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mailchimp.json b/jvm/src/main/resources/airbyte/source-mailchimp.json new file mode 100644 index 0000000..acc3145 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mailchimp.json @@ -0,0 +1,109 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/mailchimp", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Mailchimp Spec", + "type": "object", + "required": [], + "additionalProperties": true, + "properties": { + "credentials": { + "type": "object", + "title": "Authentication *", + "oneOf": [ + { + "title": "OAuth2.0", + "type": "object", + "required": ["auth_type", "access_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your OAuth application.", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your OAuth application.", + "airbyte_secret": true + }, + "access_token": { + "title": "Access Token", + "type": "string", + "description": "An access token generated using the above client ID and secret.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "API Key", + "required": ["auth_type", "apikey"], + "properties": { + "auth_type": { + "type": "string", + "const": "apikey", + "order": 1 + }, + "apikey": { + "type": "string", + "title": "API Key", + "description": "Mailchimp API Key. See the docs for information on how to generate this key.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mailgun.json b/jvm/src/main/resources/airbyte/source-mailgun.json new file mode 100644 index 0000000..25c41ff --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mailgun.json @@ -0,0 +1,30 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/mailgun", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Mailgun Spec", + "type": "object", + "required": ["private_key"], + "additionalProperties": true, + "properties": { + "private_key": { + "type": "string", + "airbyte_secret": true, + "description": "Primary account API key to access your Mailgun data.", + "title": "Private API Key" + }, + "domain_region": { + "type": "string", + "description": "Domain region code. 'EU' or 'US' are possible values. The default is 'US'.", + "title": "Domain Region Code" + }, + "start_date": { + "title": "Replication Start Date", + "description": "UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$", + "examples": ["2020-10-01 00:00:00"], + "type": "string" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-marketo.json b/jvm/src/main/resources/airbyte/source-marketo.json new file mode 100644 index 0000000..9af488b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-marketo.json @@ -0,0 +1,42 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/marketo", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Marketo Spec", + "type": "object", + "required": ["domain_url", "client_id", "client_secret", "start_date"], + "additionalProperties": true, + "properties": { + "domain_url": { + "title": "Domain URL", + "type": "string", + "order": 3, + "description": "Your Marketo Base URL. See the docs for info on how to obtain this.", + "examples": ["https://000-AAA-000.mktorest.com"], + "airbyte_secret": true + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Marketo developer application. See the docs for info on how to obtain this.", + "order": 0, + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.", + "order": 1, + "airbyte_secret": true + }, + "start_date": { + "title": "Start Date", + "type": "string", + "order": 2, + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2020-09-25T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-microsoft-teams.json b/jvm/src/main/resources/airbyte/source-microsoft-teams.json new file mode 100644 index 0000000..442abfd --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-microsoft-teams.json @@ -0,0 +1,151 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/microsoft-teams", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Microsoft Teams Spec", + "type": "object", + "required": ["period"], + "additionalProperties": true, + "properties": { + "period": { + "type": "string", + "title": "Period", + "description": "Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.", + "examples": ["D7"] + }, + "credentials": { + "title": "Authentication mechanism", + "description": "Choose how to authenticate to Microsoft", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "Authenticate via Microsoft (OAuth 2.0)", + "required": [ + "tenant_id", + "client_id", + "client_secret", + "refresh_token" + ], + "additionalProperties": false, + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "enum": ["Client"], + "default": "Client", + "order": 0 + }, + "tenant_id": { + "title": "Directory (tenant) ID", + "type": "string", + "description": "A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL" + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Microsoft Teams developer application." + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Microsoft Teams developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "A Refresh Token to renew the expired Access Token.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Authenticate via Microsoft", + "required": ["tenant_id", "client_id", "client_secret"], + "additionalProperties": false, + "properties": { + "auth_type": { + "type": "string", + "const": "Token", + "enum": ["Token"], + "default": "Token", + "order": 0 + }, + "tenant_id": { + "title": "Directory (tenant) ID", + "type": "string", + "description": "A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL" + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Microsoft Teams developer application." + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Microsoft Teams developer application.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "Client", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "tenant_id": { + "type": "string", + "path_in_connector_config": ["credentials", "tenant_id"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mixpanel.json b/jvm/src/main/resources/airbyte/source-mixpanel.json new file mode 100644 index 0000000..5f41e04 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mixpanel.json @@ -0,0 +1,71 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/mixpanel", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Mixpanel Spec", + "type": "object", + "required": ["api_secret"], + "properties": { + "api_secret": { + "order": 0, + "title": "Project Secret", + "type": "string", + "description": "Mixpanel project secret. See the docs for more information on how to obtain this.", + "airbyte_secret": true + }, + "attribution_window": { + "order": 1, + "title": "Attribution Window", + "type": "integer", + "description": " A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.", + "default": 5 + }, + "project_timezone": { + "order": 2, + "title": "Project Timezone", + "type": "string", + "description": "Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.", + "default": "US/Pacific", + "examples": ["US/Pacific", "UTC"] + }, + "select_properties_by_default": { + "order": 3, + "title": "Select Properties By Default", + "type": "boolean", + "description": "Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.", + "default": true + }, + "start_date": { + "order": 4, + "title": "Start Date", + "type": "string", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.", + "examples": ["2021-11-16"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)?$" + }, + "end_date": { + "order": 5, + "title": "End Date", + "type": "string", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date", + "examples": ["2021-11-16"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)?$" + }, + "region": { + "order": 6, + "title": "Region", + "description": "The region of mixpanel domain instance either US or EU.", + "type": "string", + "enum": ["US", "EU"], + "default": "US" + }, + "date_window_size": { + "order": 7, + "title": "Date slicing window", + "description": "Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.", + "type": "integer", + "default": 30 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-monday.json b/jvm/src/main/resources/airbyte/source-monday.json new file mode 100644 index 0000000..d558ba3 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-monday.json @@ -0,0 +1,131 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/monday", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Monday Spec", + "type": "object", + "required": [], + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authorization Method", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": [ + "auth_type", + "client_id", + "client_secret", + "access_token" + ], + "properties": { + "subdomain": { + "type": "string", + "title": "Subdomain/Slug (Optional)", + "description": "Slug/subdomain of the account, or the first part of the URL that comes before .monday.com", + "default": "", + "order": 0 + }, + "auth_type": { + "type": "string", + "const": "oauth2.0", + "order": 1 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your OAuth application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your OAuth application.", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "API Token", + "required": ["auth_type", "api_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "api_token", + "order": 0 + }, + "api_token": { + "type": "string", + "title": "Personal API Token", + "description": "API Token for making authenticated requests.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "subdomain": { + "type": "string", + "path_in_connector_config": ["credentials", "subdomain"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mongodb-v2.json b/jvm/src/main/resources/airbyte/source-mongodb-v2.json new file mode 100644 index 0000000..e78ca96 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mongodb-v2.json @@ -0,0 +1,124 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/mongodb-v2", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/mongodb-v2", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MongoDb Source Spec", + "type": "object", + "required": ["database"], + "additionalProperties": true, + "properties": { + "instance_type": { + "type": "object", + "title": "MongoDb Instance Type", + "description": "The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.", + "order": 0, + "oneOf": [ + { + "title": "Standalone MongoDb Instance", + "required": ["instance", "host", "port"], + "properties": { + "instance": { + "type": "string", + "enum": ["standalone"], + "default": "standalone" + }, + "host": { + "title": "Host", + "type": "string", + "description": "The host name of the Mongo database.", + "order": 0 + }, + "port": { + "title": "Port", + "type": "integer", + "description": "The port of the Mongo database.", + "minimum": 0, + "maximum": 65536, + "default": 27017, + "examples": ["27017"], + "order": 1 + }, + "tls": { + "title": "TLS Connection", + "type": "boolean", + "description": "Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.", + "default": false, + "order": 2 + } + } + }, + { + "title": "Replica Set", + "required": ["instance", "server_addresses"], + "properties": { + "instance": { + "type": "string", + "enum": ["replica"], + "default": "replica" + }, + "server_addresses": { + "title": "Server Addresses", + "type": "string", + "description": "The members of a replica set. Please specify `host`:`port` of each member separated by comma.", + "examples": ["host1:27017,host2:27017,host3:27017"], + "order": 0 + }, + "replica_set": { + "title": "Replica Set", + "type": "string", + "description": "A replica set in MongoDB is a group of mongod processes that maintain the same data set.", + "order": 1 + } + } + }, + { + "title": "MongoDB Atlas", + "additionalProperties": false, + "required": ["instance", "cluster_url"], + "properties": { + "instance": { + "type": "string", + "enum": ["atlas"], + "default": "atlas" + }, + "cluster_url": { + "title": "Cluster URL", + "type": "string", + "description": "The URL of a cluster to connect to.", + "order": 0 + } + } + } + ] + }, + "database": { + "title": "Database Name", + "type": "string", + "description": "The database you want to replicate.", + "order": 1 + }, + "user": { + "title": "User", + "type": "string", + "description": "The username which is used to access the database.", + "order": 2 + }, + "password": { + "title": "Password", + "type": "string", + "description": "The password associated with this username.", + "airbyte_secret": true, + "order": 3 + }, + "auth_source": { + "title": "Authentication Source", + "type": "string", + "description": "The authentication source where the user information is stored.", + "default": "admin", + "examples": ["admin"], + "order": 4 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mongodb.json b/jvm/src/main/resources/airbyte/source-mongodb.json new file mode 100644 index 0000000..87ec218 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mongodb.json @@ -0,0 +1,70 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/mongodb", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/mongodb", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Mongodb Source Spec", + "type": "object", + "required": ["host", "port", "database", "user", "password", "auth_source"], + "additionalProperties": false, + "properties": { + "host": { + "title": "Host", + "type": "string", + "description": "Host of a Mongo database to be replicated.", + "order": 0 + }, + "port": { + "title": "Port", + "type": "integer", + "description": "Port of a Mongo database to be replicated.", + "minimum": 0, + "maximum": 65536, + "default": 27017, + "examples": ["27017"], + "order": 1 + }, + "database": { + "title": "Database name", + "type": "string", + "description": "Database to be replicated.", + "order": 2 + }, + "user": { + "title": "User", + "type": "string", + "description": "User", + "order": 3 + }, + "password": { + "title": "Password", + "type": "string", + "description": "Password", + "airbyte_secret": true, + "order": 4 + }, + "auth_source": { + "title": "Authentication source", + "type": "string", + "description": "Authentication source where user information is stored. See the Mongo docs for more info.", + "default": "admin", + "examples": ["admin"], + "order": 5 + }, + "replica_set": { + "title": "Replica Set", + "type": "string", + "description": "The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.", + "default": "", + "order": 6 + }, + "ssl": { + "title": "TLS connection", + "type": "boolean", + "description": "If this switch is enabled, TLS connections will be used to connect to MongoDB.", + "default": false, + "order": 7 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mssql.json b/jvm/src/main/resources/airbyte/source-mssql.json new file mode 100644 index 0000000..8b8acf4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mssql.json @@ -0,0 +1,170 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mssql", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MSSQL Source Spec", + "type": "object", + "required": ["host", "port", "database", "username"], + "properties": { + "host": { + "description": "The hostname of the database.", + "title": "Host", + "type": "string", + "order": 0 + }, + "port": { + "description": "The port of the database.", + "title": "Port", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "examples": ["1433"], + "order": 1 + }, + "database": { + "description": "The name of the database.", + "title": "Database", + "type": "string", + "examples": ["master"], + "order": 2 + }, + "schemas": { + "title": "Schemas", + "description": "The list of schemas to sync from. Defaults to user. Case sensitive.", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 0, + "uniqueItems": true, + "default": ["dbo"], + "order": 3 + }, + "username": { + "description": "The username which is used to access the database.", + "title": "Username", + "type": "string", + "order": 4 + }, + "password": { + "description": "The password associated with the username.", + "title": "Password", + "type": "string", + "airbyte_secret": true, + "order": 5 + }, + "jdbc_url_params": { + "title": "JDBC URL Params", + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "type": "string", + "order": 6 + }, + "ssl_method": { + "title": "SSL Method", + "type": "object", + "description": "The encryption method which is used when communicating with the database.", + "order": 7, + "oneOf": [ + { + "title": "Unencrypted", + "description": "Data transfer will not be encrypted.", + "required": ["ssl_method"], + "properties": { + "ssl_method": { + "type": "string", + "const": "unencrypted", + "enum": ["unencrypted"], + "default": "unencrypted" + } + } + }, + { + "title": "Encrypted (trust server certificate)", + "description": "Use the certificate provided by the server without verification. (For testing purposes only!)", + "required": ["ssl_method"], + "properties": { + "ssl_method": { + "type": "string", + "const": "encrypted_trust_server_certificate", + "enum": ["encrypted_trust_server_certificate"], + "default": "encrypted_trust_server_certificate" + } + } + }, + { + "title": "Encrypted (verify certificate)", + "description": "Verify and use the certificate provided by the server.", + "required": ["ssl_method", "trustStoreName", "trustStorePassword"], + "properties": { + "ssl_method": { + "type": "string", + "const": "encrypted_verify_certificate", + "enum": ["encrypted_verify_certificate"], + "default": "encrypted_verify_certificate" + }, + "hostNameInCertificate": { + "title": "Host Name In Certificate", + "type": "string", + "description": "Specifies the host name of the server. The value of this property must match the subject property of the certificate.", + "order": 7 + } + } + } + ] + }, + "replication_method": { + "type": "object", + "title": "Replication Method", + "description": "The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.", + "default": "STANDARD", + "order": 8, + "oneOf": [ + { + "title": "Standard", + "description": "Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "STANDARD", + "enum": ["STANDARD"], + "default": "STANDARD", + "order": 0 + } + } + }, + { + "title": "Logical Replication (CDC)", + "description": "CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "CDC", + "enum": ["CDC"], + "default": "CDC", + "order": 0 + }, + "data_to_sync": { + "title": "Data to Sync", + "type": "string", + "default": "Existing and New", + "enum": ["Existing and New", "New Changes Only"], + "description": "What data should be synced under the CDC. \"Existing and New\" will read existing data as a snapshot, and sync new changes through CDC. \"New Changes Only\" will skip the initial snapshot, and only sync new changes through CDC.", + "order": 1 + }, + "snapshot_isolation": { + "title": "Initial Snapshot Isolation Level", + "type": "string", + "default": "Snapshot", + "enum": ["Snapshot", "Read Committed"], + "description": "Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the \"Snapshot\" level, you must enable the snapshot isolation mode on the database.", + "order": 2 + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-my-hours.json b/jvm/src/main/resources/airbyte/source-my-hours.json new file mode 100644 index 0000000..d1a739b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-my-hours.json @@ -0,0 +1,40 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/my-hours", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "My Hours Spec", + "type": "object", + "required": ["email", "password", "start_date"], + "additionalProperties": false, + "properties": { + "email": { + "title": "Email", + "type": "string", + "description": "Your My Hours username", + "example": "john@doe.com" + }, + "password": { + "title": "Password", + "type": "string", + "description": "The password associated to the username", + "airbyte_secret": true + }, + "start_date": { + "title": "Start Date", + "description": "Start date for collecting time logs", + "examples": ["%Y-%m-%d", "2016-01-01"], + "type": "string", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "logs_batch_size": { + "title": "Time logs batch size", + "description": "Pagination size used for retrieving logs in days", + "examples": [30], + "type": "integer", + "minimum": 1, + "maximum": 365, + "default": 30 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-mysql.json b/jvm/src/main/resources/airbyte/source-mysql.json new file mode 100644 index 0000000..c09509f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-mysql.json @@ -0,0 +1,221 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/mysql", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MySql Source Spec", + "type": "object", + "required": ["host", "port", "database", "username", "replication_method"], + "properties": { + "host": { + "description": "The host name of the database.", + "title": "Host", + "type": "string", + "order": 0 + }, + "port": { + "description": "The port to connect to.", + "title": "Port", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 3306, + "examples": ["3306"], + "order": 1 + }, + "database": { + "description": "The database name.", + "title": "Database", + "type": "string", + "order": 2 + }, + "username": { + "description": "The username which is used to access the database.", + "title": "Username", + "type": "string", + "order": 3 + }, + "password": { + "description": "The password associated with the username.", + "title": "Password", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 5 + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL.", + "type": "boolean", + "default": true, + "order": 6 + }, + "ssl_mode": { + "title": "SSL modes", + "description": "SSL connection modes.
  • preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.
  • required - Always connect with SSL. If the MySQL server doesn’t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.
  • verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.
  • Verify Identity - Always connect with SSL. Verify both CA and Hostname.
  • Read more in the docs.", + "type": "object", + "order": 7, + "oneOf": [ + { + "title": "preferred", + "description": "Preferred SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "preferred", + "enum": ["preferred"], + "default": "preferred", + "order": 0 + } + } + }, + { + "title": "required", + "description": "Require SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "required", + "enum": ["required"], + "default": "required", + "order": 0 + } + } + }, + { + "title": "Verify CA", + "description": "Verify CA SSL mode.", + "required": ["mode", "ca_certificate"], + "properties": { + "mode": { + "type": "string", + "const": "verify_ca", + "enum": ["verify_ca"], + "default": "verify_ca", + "order": 0 + }, + "ca_certificate": { + "type": "string", + "title": "CA certificate", + "description": "CA certificate", + "airbyte_secret": true, + "multiline": true, + "order": 1 + }, + "client_certificate": { + "type": "string", + "title": "Client certificate", + "description": "Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well)", + "airbyte_secret": true, + "multiline": true, + "order": 2 + }, + "client_key": { + "type": "string", + "title": "Client key", + "description": "Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well)", + "airbyte_secret": true, + "multiline": true, + "order": 3 + }, + "client_key_password": { + "type": "string", + "title": "Client key password (Optional)", + "description": "Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.", + "airbyte_secret": true, + "order": 4 + } + } + }, + { + "title": "Verify Identity", + "description": "Verify-full SSL mode.", + "required": ["mode", "ca_certificate"], + "properties": { + "mode": { + "type": "string", + "const": "verify_identity", + "enum": ["verify_identity"], + "default": "verify_identity", + "order": 0 + }, + "ca_certificate": { + "type": "string", + "title": "CA certificate", + "description": "CA certificate", + "airbyte_secret": true, + "multiline": true, + "order": 1 + }, + "client_certificate": { + "type": "string", + "title": "Client certificate", + "description": "Client certificate (this is not a required field, but if you want to use it, you will need to add the Client key as well)", + "airbyte_secret": true, + "multiline": true, + "order": 2 + }, + "client_key": { + "type": "string", + "title": "Client key", + "description": "Client key (this is not a required field, but if you want to use it, you will need to add the Client certificate as well)", + "airbyte_secret": true, + "multiline": true, + "order": 3 + }, + "client_key_password": { + "type": "string", + "title": "Client key password (Optional)", + "description": "Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.", + "airbyte_secret": true, + "order": 4 + } + } + } + ] + }, + "replication_method": { + "type": "object", + "title": "Replication Method", + "description": "Replication method to use for extracting data from the database.", + "order": 8, + "oneOf": [ + { + "title": "STANDARD", + "description": "Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "STANDARD", + "enum": ["STANDARD"], + "default": "STANDARD", + "order": 0 + } + } + }, + { + "title": "Logical Replication (CDC)", + "description": "CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "CDC", + "enum": ["CDC"], + "default": "CDC", + "order": 0 + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-notion.json b/jvm/src/main/resources/airbyte/source-notion.json new file mode 100644 index 0000000..caecbfa --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-notion.json @@ -0,0 +1,85 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/notion", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Notion Source Spec", + "type": "object", + "required": ["start_date"], + "properties": { + "start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}Z$", + "examples": ["2020-11-16T00:00:00.000Z"], + "type": "string" + }, + "credentials": { + "title": "Authenticate using", + "description": "Pick an authentication method.", + "type": "object", + "order": 1, + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": [ + "auth_type", + "client_id", + "client_secret", + "access_token" + ], + "properties": { + "auth_type": { + "type": "string", + "const": "OAuth2.0" + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The ClientID of your Notion integration.", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The ClientSecret of your Notion integration.", + "airbyte_secret": true + }, + "access_token": { + "title": "Access Token", + "type": "string", + "description": "Access Token is a token you received by complete the OauthWebFlow of Notion.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Access Token", + "required": ["auth_type", "token"], + "properties": { + "auth_type": { + "type": "string", + "const": "token" + }, + "token": { + "title": "Access Token", + "description": "Notion API access token, see the docs for more information on how to obtain this token.", + "type": "string", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", "0"], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["access_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-okta.json b/jvm/src/main/resources/airbyte/source-okta.json new file mode 100644 index 0000000..9fc91d4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-okta.json @@ -0,0 +1,137 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/okta", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Okta Spec", + "type": "object", + "required": [], + "additionalProperties": true, + "properties": { + "domain": { + "type": "string", + "title": "Okta domain", + "description": "The Okta domain. See the docs for instructions on how to find it.", + "airbyte_secret": false + }, + "start_date": { + "type": "string", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated.", + "examples": ["2022-07-22T00:00:00Z"], + "title": "Start Date" + }, + "credentials": { + "title": "Authorization Method *", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": [ + "auth_type", + "client_id", + "client_secret", + "refresh_token" + ], + "properties": { + "auth_type": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your OAuth application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your OAuth application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Refresh Token to obtain new Access Token, when it's expired.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "API Token", + "required": ["auth_type", "api_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "api_token", + "order": 0 + }, + "api_token": { + "type": "string", + "title": "Personal API Token", + "description": "An Okta token. See the docs for instructions on how to generate it.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": true, + "properties": { + "domain": { + "type": "string", + "path_in_connector_config": ["domain"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-onesignal.json b/jvm/src/main/resources/airbyte/source-onesignal.json new file mode 100644 index 0000000..1134e5b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-onesignal.json @@ -0,0 +1,33 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/onesignal", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OneSignal Source Spec", + "type": "object", + "required": ["user_auth_key", "start_date", "outcome_names"], + "additionalProperties": false, + "properties": { + "user_auth_key": { + "type": "string", + "title": "User Auth Key", + "description": "OneSignal User Auth Key, see the docs for more information on how to obtain this key.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2020-11-16T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "outcome_names": { + "type": "string", + "title": "Outcome Names", + "description": "Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details", + "examples": [ + "os__session_duration.count,os__click.count,CustomOutcomeName.sum" + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-openweather.json b/jvm/src/main/resources/airbyte/source-openweather.json new file mode 100644 index 0000000..6efb78f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-openweather.json @@ -0,0 +1,96 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Open Weather Spec", + "type": "object", + "required": ["appid", "lat", "lon"], + "additionalProperties": true, + "properties": { + "lat": { + "title": "Latitude", + "type": "string", + "pattern": "^[-]?\\d{1,2}(\\.\\d+)?$", + "examples": ["45.7603", "-21.249107858038816"], + "description": "Latitude for which you want to get weather condition from. (min -90, max 90)" + }, + "lon": { + "title": "Longitude", + "type": "string", + "pattern": "^[-]?\\d{1,3}(\\.\\d+)?$", + "examples": ["4.835659", "-70.39482074115321"], + "description": "Longitude for which you want to get weather condition from. (min -180, max 180)" + }, + "appid": { + "title": "App ID", + "type": "string", + "description": "Your OpenWeather API Key. See here. The key is case sensitive.", + "airbyte_secret": true + }, + "units": { + "title": "Units", + "type": "string", + "description": "Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default.", + "enum": ["standard", "metric", "imperial"], + "examples": ["standard", "metric", "imperial"] + }, + "lang": { + "title": "Language", + "type": "string", + "description": "You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages.", + "enum": [ + "af", + "al", + "ar", + "az", + "bg", + "ca", + "cz", + "da", + "de", + "el", + "en", + "eu", + "fa", + "fi", + "fr", + "gl", + "he", + "hi", + "hr", + "hu", + "id", + "it", + "ja", + "kr", + "la", + "lt", + "mk", + "no", + "nl", + "pl", + "pt", + "pt_br", + "ro", + "ru", + "sv", + "se", + "sk", + "sl", + "sp", + "es", + "sr", + "th", + "tr", + "ua", + "uk", + "vi", + "zh_cn", + "zh_tw", + "zu" + ], + "examples": ["en", "fr", "pt_br", "uk", "zh_cn", "zh_tw"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-oracle.json b/jvm/src/main/resources/airbyte/source-oracle.json new file mode 100644 index 0000000..3dce50d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-oracle.json @@ -0,0 +1,162 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/oracle", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Oracle Source Spec", + "type": "object", + "required": ["host", "port", "username"], + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 1 + }, + "port": { + "title": "Port", + "description": "Port of the database.\nOracle Corporations recommends the following port numbers:\n1521 - Default listening port for client connections to the listener. \n2484 - Recommended and officially registered listening port for client connections to the listener using TCP/IP with SSL", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 1521, + "order": 2 + }, + "connection_data": { + "title": "Connect by", + "type": "object", + "description": "Connect data that will be used for DB connection", + "order": 3, + "oneOf": [ + { + "title": "Service name", + "description": "Use service name", + "required": ["service_name"], + "properties": { + "connection_type": { + "type": "string", + "const": "service_name", + "default": "service_name", + "order": 0 + }, + "service_name": { + "title": "Service name", + "type": "string", + "order": 1 + } + } + }, + { + "title": "System ID (SID)", + "description": "Use SID (Oracle System Identifier)", + "required": ["sid"], + "properties": { + "connection_type": { + "type": "string", + "const": "sid", + "default": "sid", + "order": 0 + }, + "sid": { + "title": "System ID (SID)", + "type": "string", + "order": 1 + } + } + } + ] + }, + "username": { + "title": "User", + "description": "The username which is used to access the database.", + "type": "string", + "order": 4 + }, + "password": { + "title": "Password", + "description": "The password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 5 + }, + "schemas": { + "title": "Schemas", + "description": "The list of schemas to sync from. Defaults to user. Case sensitive.", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true, + "order": 6 + }, + "jdbc_url_params": { + "title": "JDBC URL Params", + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "type": "string", + "order": 7 + }, + "encryption": { + "title": "Encryption", + "type": "object", + "description": "The encryption method with is used when communicating with the database.", + "order": 8, + "oneOf": [ + { + "title": "Unencrypted", + "description": "Data transfer will not be encrypted.", + "required": ["encryption_method"], + "properties": { + "encryption_method": { + "type": "string", + "const": "unencrypted", + "enum": ["unencrypted"], + "default": "unencrypted" + } + } + }, + { + "title": "Native Network Encryption (NNE)", + "description": "The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.", + "required": ["encryption_method"], + "properties": { + "encryption_method": { + "type": "string", + "const": "client_nne", + "enum": ["client_nne"], + "default": "client_nne" + }, + "encryption_algorithm": { + "type": "string", + "description": "This parameter defines what encryption algorithm is used.", + "title": "Encryption Algorithm", + "default": "AES256", + "enum": ["AES256", "RC4_56", "3DES168"] + } + } + }, + { + "title": "TLS Encrypted (verify certificate)", + "description": "Verify and use the certificate provided by the server.", + "required": ["encryption_method", "ssl_certificate"], + "properties": { + "encryption_method": { + "type": "string", + "const": "encrypted_verify_certificate", + "enum": ["encrypted_verify_certificate"], + "default": "encrypted_verify_certificate" + }, + "ssl_certificate": { + "title": "SSL PEM File", + "description": "Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.", + "type": "string", + "airbyte_secret": true, + "multiline": true, + "order": 4 + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-orb.json b/jvm/src/main/resources/airbyte/source-orb.json new file mode 100644 index 0000000..37f9d77 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-orb.json @@ -0,0 +1,53 @@ +{ + "documentationUrl": "https://docs.withorb.com/", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Orb Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": true, + "properties": { + "api_key": { + "type": "string", + "title": "Orb API Key", + "description": "Orb API Key, issued from the Orb admin console.", + "airbyte_secret": true, + "order": 1 + }, + "start_date": { + "type": "string", + "title": "Start Date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced.", + "examples": ["2022-03-01T00:00:00Z"], + "order": 2 + }, + "lookback_window_days": { + "type": "integer", + "title": "Lookback Window (in days)", + "default": 0, + "minimum": 0, + "description": "When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced.", + "order": 3 + }, + "string_event_properties_keys": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Event properties keys (string values)", + "description": "Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction.", + "order": 4 + }, + "numeric_event_properties_keys": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Event properties keys (numeric values)", + "description": "Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction.", + "order": 5 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-outreach.json b/jvm/src/main/resources/airbyte/source-outreach.json new file mode 100644 index 0000000..149684e --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-outreach.json @@ -0,0 +1,47 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/outreach", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Outreach Spec", + "type": "object", + "required": [ + "client_id", + "client_secret", + "refresh_token", + "redirect_uri", + "start_date" + ], + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your Outreach developer application." + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your Outreach developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "The token for obtaining the new access token.", + "airbyte_secret": true + }, + "redirect_uri": { + "type": "string", + "title": "Redirect URI", + "description": "A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token." + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2020-11-16T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-pardot.json b/jvm/src/main/resources/airbyte/source-pardot.json new file mode 100644 index 0000000..da48d19 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-pardot.json @@ -0,0 +1,48 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pardot Spec", + "type": "object", + "required": [ + "pardot_business_unit_id", + "client_id", + "client_secret", + "refresh_token" + ], + "additionalProperties": false, + "properties": { + "pardot_business_unit_id": { + "description": "Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup", + "type": "string" + }, + "client_id": { + "description": "The Consumer Key that can be found when viewing your app in Salesforce", + "type": "string", + "airbyte_secret": true + }, + "client_secret": { + "description": "The Consumer Secret that can be found when viewing your app in Salesforce", + "type": "string", + "airbyte_secret": true + }, + "refresh_token": { + "description": "Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it.", + "type": "string", + "airbyte_secret": true + }, + "start_date": { + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter", + "type": "string", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "default": null, + "examples": ["2021-07-25T00:00:00Z"] + }, + "is_sandbox": { + "description": "Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false.", + "type": "boolean", + "default": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-paypal-transaction.json b/jvm/src/main/resources/airbyte/source-paypal-transaction.json new file mode 100644 index 0000000..22f44ac --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-paypal-transaction.json @@ -0,0 +1,43 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/paypal-transactions", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Paypal Transaction Search", + "type": "object", + "required": ["start_date", "is_sandbox"], + "additionalProperties": true, + "properties": { + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your Paypal developer application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client secret", + "description": "The Client Secret of your Paypal developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh token (Optional)", + "description": "The key to refresh the expired access token.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time.", + "examples": ["2021-06-11T23:59:59-00:00"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}[+-][0-9]{2}:[0-9]{2}$" + }, + "is_sandbox": { + "title": "Sandbox", + "description": "Determines whether to use the sandbox or production environment.", + "type": "boolean", + "default": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-paystack.json b/jvm/src/main/resources/airbyte/source-paystack.json new file mode 100644 index 0000000..f77473a --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-paystack.json @@ -0,0 +1,33 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/paystack", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Paystack Source Spec", + "type": "object", + "required": ["secret_key", "start_date"], + "additionalProperties": false, + "properties": { + "secret_key": { + "type": "string", + "title": "Secret Key", + "pattern": "^(s|r)k_(live|test)_[a-zA-Z0-9]+$", + "description": "The Paystack API key (usually starts with 'sk_live_'; find yours here).", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2017-01-25T00:00:00Z"] + }, + "lookback_window_days": { + "type": "integer", + "title": "Lookback Window (in days)", + "default": 0, + "minimum": 0, + "description": "When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-persistiq.json b/jvm/src/main/resources/airbyte/source-persistiq.json new file mode 100644 index 0000000..bf32602 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-persistiq.json @@ -0,0 +1,17 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/persistiq", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Persistiq Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "description": "PersistIq API Key. See the docs for more information on where to find that key.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-pinterest.json b/jvm/src/main/resources/airbyte/source-pinterest.json new file mode 100644 index 0000000..031300d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-pinterest.json @@ -0,0 +1,115 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/pinterest", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pinterest Spec", + "type": "object", + "required": ["start_date"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to 2020-07-28.", + "examples": ["2020-07-28"] + }, + "credentials": { + "title": "Authorization Method", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": ["auth_method", "refresh_token"], + "properties": { + "auth_method": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your OAuth application", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your OAuth application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Refresh Token to obtain new Access Token, when it's expired.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Access Token", + "required": ["auth_method", "access_token"], + "properties": { + "auth_method": { + "type": "string", + "const": "access_token", + "order": 0 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The Access Token to make authenticated requests.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_method"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-pipedrive.json b/jvm/src/main/resources/airbyte/source-pipedrive.json new file mode 100644 index 0000000..d03f5c6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-pipedrive.json @@ -0,0 +1,88 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/pipedrive", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pipedrive Spec", + "type": "object", + "required": ["replication_start_date"], + "additionalProperties": true, + "properties": { + "authorization": { + "type": "object", + "title": "Authentication Type", + "description": "Choose one of the possible authorization method", + "oneOf": [ + { + "title": "Sign in via Pipedrive (OAuth)", + "type": "object", + "required": [ + "auth_type", + "client_id", + "client_secret", + "refresh_token" + ], + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Pipedrive developer application.", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Pipedrive developer application", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "The token for obtaining the new access token.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "API Key Authentication", + "required": ["auth_type", "api_token"], + "properties": { + "auth_type": { + "type": "string", + "const": "Token", + "order": 0 + }, + "api_token": { + "title": "API Token", + "type": "string", + "description": "The Pipedrive API Token.", + "airbyte_secret": true + } + } + } + ] + }, + "replication_start_date": { + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2017-01-25T00:00:00Z"], + "type": "string" + } + } + }, + "supportsIncremental": true, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["authorization", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-pivotal-tracker.json b/jvm/src/main/resources/airbyte/source-pivotal-tracker.json new file mode 100644 index 0000000..40eaea4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-pivotal-tracker.json @@ -0,0 +1,17 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pivotal Tracker Spec", + "type": "object", + "required": ["api_token"], + "additionalProperties": false, + "properties": { + "api_token": { + "type": "string", + "description": "Pivotal Tracker API token", + "examples": ["5c054d0de3440452190fdc5d5a04d871"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-plaid.json b/jvm/src/main/resources/airbyte/source-plaid.json new file mode 100644 index 0000000..0960530 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-plaid.json @@ -0,0 +1,40 @@ +{ + "documentationUrl": "https://plaid.com/docs/api/", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": ["access_token", "api_key", "client_id", "plaid_env"], + "additionalProperties": true, + "properties": { + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The end-user's Link access token." + }, + "api_key": { + "title": "API Key", + "type": "string", + "description": "The Plaid API key to use to hit the API.", + "airbyte_secret": true + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Plaid client id" + }, + "plaid_env": { + "title": "Plaid Environment", + "type": "string", + "enum": ["sandbox", "development", "production"], + "description": "The Plaid environment" + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated.", + "examples": ["2021-03-01"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-pokeapi.json b/jvm/src/main/resources/airbyte/source-pokeapi.json new file mode 100644 index 0000000..6f0a9b4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-pokeapi.json @@ -0,0 +1,19 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/pokeapi", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pokeapi Spec", + "type": "object", + "required": ["pokemon_name"], + "additionalProperties": false, + "properties": { + "pokemon_name": { + "type": "string", + "title": "Pokemon Name", + "description": "Pokemon requested from the API.", + "pattern": "^[a-z0-9_\\-]+$", + "examples": ["ditto", "luxray", "snorlax"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-postgres.json b/jvm/src/main/resources/airbyte/source-postgres.json new file mode 100644 index 0000000..f6fc019 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-postgres.json @@ -0,0 +1,296 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/postgres", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Postgres Source Spec", + "type": "object", + "required": ["host", "port", "database", "username"], + "properties": { + "host": { + "title": "Host", + "description": "Hostname of the database.", + "type": "string", + "order": 0 + }, + "port": { + "title": "Port", + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 5432, + "examples": ["5432"], + "order": 1 + }, + "database": { + "title": "Database Name", + "description": "Name of the database.", + "type": "string", + "order": 2 + }, + "schemas": { + "title": "Schemas", + "description": "The list of schemas (case sensitive) to sync from. Defaults to public.", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 0, + "uniqueItems": true, + "default": ["public"], + "order": 3 + }, + "username": { + "title": "Username", + "description": "Username to access the database.", + "type": "string", + "order": 4 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 5 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.", + "title": "JDBC URL Parameters (Advanced)", + "type": "string", + "order": 6 + }, + "ssl": { + "title": "Connect using SSL", + "description": "Encrypt data using SSL. When activating SSL, please select one of the connection modes.", + "type": "boolean", + "default": false, + "order": 7 + }, + "ssl_mode": { + "title": "SSL Modes", + "description": "SSL connection modes. \n
    • disable - Disables encryption of communication between Airbyte and source database
    • \n
    • allow - Enables encryption only when required by the source database
    • \n
    • prefer - allows unencrypted connection only if the source database does not support encryption
    • \n
    • require - Always require encryption. If the source database server does not support encryption, connection will fail
    • \n
    • verify-ca - Always require encryption and verifies that the source database server has a valid SSL certificate
    • \n
    • verify-full - This is the most secure mode. Always require encryption and verifies the identity of the source database server
    \n Read more in the docs.", + "type": "object", + "order": 7, + "oneOf": [ + { + "title": "disable", + "additionalProperties": false, + "description": "Disable SSL.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "disable", + "enum": ["disable"], + "default": "disable", + "order": 0 + } + } + }, + { + "title": "allow", + "additionalProperties": false, + "description": "Allow SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "allow", + "enum": ["allow"], + "default": "allow", + "order": 0 + } + } + }, + { + "title": "prefer", + "additionalProperties": false, + "description": "Prefer SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "prefer", + "enum": ["prefer"], + "default": "prefer", + "order": 0 + } + } + }, + { + "title": "require", + "additionalProperties": false, + "description": "Require SSL mode.", + "required": ["mode"], + "properties": { + "mode": { + "type": "string", + "const": "require", + "enum": ["require"], + "default": "require", + "order": 0 + } + } + }, + { + "title": "verify-ca", + "additionalProperties": false, + "description": "Verify-ca SSL mode.", + "required": ["mode", "ca_certificate"], + "properties": { + "mode": { + "type": "string", + "const": "verify-ca", + "enum": ["verify-ca"], + "default": "verify-ca", + "order": 0 + }, + "ca_certificate": { + "type": "string", + "title": "CA certificate", + "description": "CA certificate", + "airbyte_secret": true, + "multiline": true, + "order": 1 + }, + "client_certificate": { + "type": "string", + "title": "Client Certificate (Optional)", + "description": "Client certificate", + "airbyte_secret": true, + "multiline": true, + "order": 2 + }, + "client_key": { + "type": "string", + "title": "Client Key (Optional)", + "description": "Client key", + "airbyte_secret": true, + "multiline": true, + "order": 3 + }, + "client_key_password": { + "type": "string", + "title": "Client key password (Optional)", + "description": "Password for keystorage. If you do not add it - the password will be generated automatically.", + "airbyte_secret": true, + "order": 4 + } + } + }, + { + "title": "verify-full", + "additionalProperties": false, + "description": "Verify-full SSL mode.", + "required": ["mode", "ca_certificate"], + "properties": { + "mode": { + "type": "string", + "const": "verify-full", + "enum": ["verify-full"], + "default": "verify-full", + "order": 0 + }, + "ca_certificate": { + "type": "string", + "title": "CA Certificate", + "description": "CA certificate", + "airbyte_secret": true, + "multiline": true, + "order": 1 + }, + "client_certificate": { + "type": "string", + "title": "Client Certificate (Optional)", + "description": "Client certificate", + "airbyte_secret": true, + "multiline": true, + "order": 2 + }, + "client_key": { + "type": "string", + "title": "Client Key (Optional)", + "description": "Client key", + "airbyte_secret": true, + "multiline": true, + "order": 3 + }, + "client_key_password": { + "type": "string", + "title": "Client key password (Optional)", + "description": "Password for keystorage. If you do not add it - the password will be generated automatically.", + "airbyte_secret": true, + "order": 4 + } + } + } + ] + }, + "replication_method": { + "type": "object", + "title": "Replication Method", + "description": "Replication method for extracting data from the database.", + "order": 8, + "oneOf": [ + { + "title": "Standard", + "description": "Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.", + "required": ["method"], + "properties": { + "method": { + "type": "string", + "const": "Standard", + "enum": ["Standard"], + "default": "Standard", + "order": 0 + } + } + }, + { + "title": "Logical Replication (CDC)", + "description": "Logical replication uses the Postgres write-ahead log (WAL) to detect inserts, updates, and deletes. This needs to be configured on the source database itself. Only available on Postgres 10 and above. Read the docs.", + "required": ["method", "replication_slot", "publication"], + "properties": { + "method": { + "type": "string", + "const": "CDC", + "enum": ["CDC"], + "default": "CDC", + "order": 0 + }, + "plugin": { + "type": "string", + "title": "Plugin", + "description": "A logical decoding plugin installed on the PostgreSQL server. The `pgoutput` plugin is used by default. If the replication table contains a lot of big jsonb values it is recommended to use `wal2json` plugin. Read more about selecting replication plugins.", + "enum": ["pgoutput", "wal2json"], + "default": "pgoutput", + "order": 1 + }, + "replication_slot": { + "type": "string", + "title": "Replication Slot", + "description": "A plugin logical replication slot. Read about replication slots.", + "order": 2 + }, + "publication": { + "type": "string", + "title": "Publication", + "description": "A Postgres publication used for consuming changes. Read about publications and replication identities.", + "order": 3 + }, + "initial_waiting_seconds": { + "type": "integer", + "title": "Initial Waiting Time in Seconds (Advanced)", + "description": "The amount of time the connector will wait when it launches to determine if there is new data to sync or not. Defaults to 300 seconds. Valid range: 120 seconds to 1200 seconds. Read about initial waiting time.", + "default": 300, + "order": 4, + "min": 120, + "max": 1200 + } + } + } + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-posthog.json b/jvm/src/main/resources/airbyte/source-posthog.json new file mode 100644 index 0000000..46208ba --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-posthog.json @@ -0,0 +1,31 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/posthog", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "PostHog Spec", + "type": "object", + "required": ["api_key", "start_date"], + "properties": { + "start_date": { + "title": "Start Date", + "type": "string", + "description": "The date from which you'd like to replicate the data. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2021-01-01T00:00:00Z"] + }, + "api_key": { + "type": "string", + "airbyte_secret": true, + "title": "API Key", + "description": "API Key. See the docs for information on how to generate this key." + }, + "base_url": { + "type": "string", + "default": "https://app.posthog.com", + "title": "Base URL", + "description": "Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com).", + "examples": ["https://posthog.example.com"] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-prestashop.json b/jvm/src/main/resources/airbyte/source-prestashop.json new file mode 100644 index 0000000..b32d67f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-prestashop.json @@ -0,0 +1,21 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "PrestaShop Spec", + "type": "object", + "required": ["url", "access_key"], + "additionalProperties": false, + "properties": { + "url": { + "type": "string", + "description": "Shop URL without trailing slash (domain name or IP address)" + }, + "access_key": { + "type": "string", + "description": "Your PrestaShop access key. See the docs for info on how to obtain this.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-qualaroo.json b/jvm/src/main/resources/airbyte/source-qualaroo.json new file mode 100644 index 0000000..c183880 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-qualaroo.json @@ -0,0 +1,48 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/qualaroo", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Qualaroo Spec", + "type": "object", + "required": ["token", "key", "start_date"], + "additionalProperties": true, + "properties": { + "token": { + "type": "string", + "title": "API token", + "description": "A Qualaroo token. See the docs for instructions on how to generate it.", + "airbyte_secret": true + }, + "key": { + "type": "string", + "title": "API key", + "description": "A Qualaroo token. See the docs for instructions on how to generate it.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}Z$", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2021-03-01T00:00:00.000Z"] + }, + "survey_ids": { + "type": "array", + "items": { + "type": "string", + "pattern": "^[0-9]{1,8}$" + }, + "title": "Qualaroo survey IDs", + "description": "IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated." + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["token"], ["key"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-quickbooks-singer.json b/jvm/src/main/resources/airbyte/source-quickbooks-singer.json new file mode 100644 index 0000000..8c03de4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-quickbooks-singer.json @@ -0,0 +1,61 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/quickbooks", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source QuickBooks Singer Spec", + "type": "object", + "required": [ + "client_id", + "client_secret", + "refresh_token", + "realm_id", + "user_agent", + "start_date", + "sandbox" + ], + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "title": "Client ID", + "description": "Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production." + }, + "client_secret": { + "description": " Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production.", + "title": "Client Secret", + "type": "string", + "airbyte_secret": true + }, + "refresh_token": { + "description": "A token used when refreshing the access token.", + "title": "Refresh Token", + "type": "string", + "airbyte_secret": true + }, + "realm_id": { + "description": "Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token.", + "title": "Realm ID", + "type": "string", + "airbyte_secret": true + }, + "user_agent": { + "type": "string", + "title": "User Agent", + "description": "Process and email for API logging purposes. Example: tap-quickbooks ." + }, + "start_date": { + "description": "The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated.", + "title": "Start Date", + "type": "string", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2021-03-20T00:00:00Z"] + }, + "sandbox": { + "description": "Determines whether to use the sandbox or production environment.", + "title": "Sandbox", + "type": "boolean", + "default": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-recharge.json b/jvm/src/main/resources/airbyte/source-recharge.json new file mode 100644 index 0000000..d280aee --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-recharge.json @@ -0,0 +1,25 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/recharge", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Recharge Spec", + "type": "object", + "required": ["start_date", "access_token"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2021-05-14T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The value of the Access Token generated. See the docs for more information.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-recurly.json b/jvm/src/main/resources/airbyte/source-recurly.json new file mode 100644 index 0000000..02a427f --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-recurly.json @@ -0,0 +1,33 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/recurly", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Recurly Source Spec", + "type": "object", + "required": ["api_key"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string", + "title": "API Key", + "airbyte_secret": true, + "description": "Recurly API Key. See the docs for more information on how to generate this key.", + "order": 1 + }, + "begin_time": { + "type": "string", + "description": "ISO8601 timestamp from which the replication from Recurly API will start from.", + "examples": ["2021-12-01T00:00:00"], + "pattern": "^$|^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$", + "order": 2 + }, + "end_time": { + "type": "string", + "description": "ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported.", + "examples": ["2021-12-01T00:00:00"], + "pattern": "^$|^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$", + "order": 3 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-redshift.json b/jvm/src/main/resources/airbyte/source-redshift.json new file mode 100644 index 0000000..8393dea --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-redshift.json @@ -0,0 +1,65 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/destinations/redshift", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Redshift Source Spec", + "type": "object", + "required": ["host", "port", "database", "username", "password"], + "properties": { + "host": { + "title": "Host", + "description": "Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com).", + "type": "string", + "order": 1 + }, + "port": { + "title": "Port", + "description": "Port of the database.", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 5439, + "examples": ["5439"], + "order": 2 + }, + "database": { + "title": "Database", + "description": "Name of the database.", + "type": "string", + "examples": ["master"], + "order": 3 + }, + "schemas": { + "title": "Schemas", + "description": "The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive.", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 0, + "uniqueItems": true, + "examples": ["public"], + "order": 4 + }, + "username": { + "title": "Username", + "description": "Username to use to access the database.", + "type": "string", + "order": 5 + }, + "password": { + "title": "Password", + "description": "Password associated with the username.", + "type": "string", + "airbyte_secret": true, + "order": 6 + }, + "jdbc_url_params": { + "title": "JDBC URL Params", + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "type": "string", + "order": 7 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-retently.json b/jvm/src/main/resources/airbyte/source-retently.json new file mode 100644 index 0000000..a509dc6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-retently.json @@ -0,0 +1,114 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Retently Api Spec", + "type": "object", + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authentication Mechanism", + "description": "Choose how to authenticate to Retently", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "Authenticate via Retently (OAuth)", + "required": ["client_id", "client_secret", "refresh_token"], + "additionalProperties": false, + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "enum": ["Client"], + "default": "Client", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Retently developer application." + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Retently developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "Retently Refresh Token which can be used to fetch new Bearer Tokens when the current one expires.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Authenticate with API Token", + "required": ["api_key"], + "additionalProperties": false, + "properties": { + "auth_type": { + "type": "string", + "const": "Token", + "enum": ["Token"], + "default": "Token", + "order": 0 + }, + "api_key": { + "title": "API Token", + "description": "Retently API Token. See the docs for more information on how to obtain this key.", + "type": "string", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "Client", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-rki-covid.json b/jvm/src/main/resources/airbyte/source-rki-covid.json new file mode 100644 index 0000000..1991c44 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-rki-covid.json @@ -0,0 +1,18 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/rki-covid", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "RKI Covid Spec", + "type": "object", + "required": ["start_date"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "UTC date in the format 2017-01-25. Any data before this date will not be replicated.", + "order": 1 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-s3.json b/jvm/src/main/resources/airbyte/source-s3.json new file mode 100644 index 0000000..d3825b9 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-s3.json @@ -0,0 +1,277 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/s3", + "changelogUrl": "https://docs.airbyte.io/integrations/sources/s3", + "connectionSpecification": { + "title": "S3 Source Spec", + "type": "object", + "properties": { + "dataset": { + "title": "Output Stream Name", + "description": "The name of the stream you would like this source to output. Can contain letters, numbers, or underscores.", + "pattern": "^([A-Za-z0-9-_]+)$", + "order": 0, + "type": "string" + }, + "path_pattern": { + "title": "Pattern of files to replicate", + "description": "A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files.", + "examples": [ + "**", + "myFolder/myTableFiles/*.csv|myFolder/myOtherTableFiles/*.csv" + ], + "order": 10, + "type": "string" + }, + "format": { + "title": "File Format", + "description": "The format of the files you'd like to replicate", + "default": "csv", + "order": 20, + "type": "object", + "oneOf": [ + { + "title": "CSV", + "description": "This connector utilises PyArrow (Apache Arrow) for CSV parsing.", + "type": "object", + "properties": { + "filetype": { + "title": "Filetype", + "const": "csv", + "type": "string" + }, + "delimiter": { + "title": "Delimiter", + "description": "The character delimiting individual cells in the CSV data. This may only be a 1-character string. For tab-delimited data enter '\\t'.", + "default": ",", + "minLength": 1, + "order": 0, + "type": "string" + }, + "infer_datatypes": { + "title": "Infer Datatypes", + "description": "Configures whether a schema for the source should be inferred from the current data or not. If set to false and a custom schema is set, then the manually enforced schema is used. If a schema is not manually set, and this is set to false, then all fields will be read as strings", + "default": true, + "order": 1, + "type": "boolean" + }, + "quote_char": { + "title": "Quote Character", + "description": "The character used for quoting CSV values. To disallow quoting, make this field blank.", + "default": "\"", + "order": 2, + "type": "string" + }, + "escape_char": { + "title": "Escape Character", + "description": "The character used for escaping special characters. To disallow escaping, leave this field blank.", + "order": 3, + "type": "string" + }, + "encoding": { + "title": "Encoding", + "description": "The character encoding of the CSV data. Leave blank to default to UTF8. See list of python encodings for allowable options.", + "default": "utf8", + "order": 4, + "type": "string" + }, + "double_quote": { + "title": "Double Quote", + "description": "Whether two quotes in a quoted CSV value denote a single quote in the data.", + "default": true, + "order": 5, + "type": "boolean" + }, + "newlines_in_values": { + "title": "Allow newlines in values", + "description": "Whether newline characters are allowed in CSV values. Turning this on may affect performance. Leave blank to default to False.", + "default": false, + "order": 6, + "type": "boolean" + }, + "additional_reader_options": { + "title": "Additional Reader Options", + "description": "Optionally add a valid JSON string here to provide additional options to the csv reader. Mappings must correspond to options detailed here. 'column_types' is used internally to handle schema so overriding that would likely cause problems.", + "default": "{}", + "examples": [ + "{\"timestamp_parsers\": [\"%m/%d/%Y %H:%M\", \"%Y/%m/%d %H:%M\"], \"strings_can_be_null\": true, \"null_values\": [\"NA\", \"NULL\"]}" + ], + "order": 7, + "type": "string" + }, + "advanced_options": { + "title": "Advanced Options", + "description": "Optionally add a valid JSON string here to provide additional Pyarrow ReadOptions. Specify 'column_names' here if your CSV doesn't have header, or if you want to use custom column names. 'block_size' and 'encoding' are already used above, specify them again here will override the values above.", + "default": "{}", + "examples": ["{\"column_names\": [\"column1\", \"column2\"]}"], + "order": 8, + "type": "string" + }, + "block_size": { + "title": "Block Size", + "description": "The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors.", + "default": 10000, + "order": 9, + "type": "integer" + } + } + }, + { + "title": "Parquet", + "description": "This connector utilises PyArrow (Apache Arrow) for Parquet parsing.", + "type": "object", + "properties": { + "filetype": { + "title": "Filetype", + "const": "parquet", + "type": "string" + }, + "columns": { + "title": "Selected Columns", + "description": "If you only want to sync a subset of the columns from the file(s), add the columns you want here as a comma-delimited list. Leave it empty to sync all columns.", + "order": 0, + "type": "array", + "items": { + "type": "string" + } + }, + "batch_size": { + "title": "Record batch size", + "description": "Maximum number of records per batch read from the input files. Batches may be smaller if there aren’t enough rows in the file. This option can help avoid out-of-memory errors if your data is particularly wide.", + "default": 65536, + "order": 1, + "type": "integer" + }, + "buffer_size": { + "title": "Buffer Size", + "description": "Perform read buffering when deserializing individual column chunks. By default every group column will be loaded fully to memory. This option can help avoid out-of-memory errors if your data is particularly wide.", + "default": 2, + "type": "integer" + } + } + }, + { + "title": "Avro", + "description": "This connector utilises fastavro for Avro parsing.", + "type": "object", + "properties": { + "filetype": { + "title": "Filetype", + "const": "avro", + "type": "string" + } + } + }, + { + "title": "Jsonl", + "description": "This connector uses PyArrow for JSON Lines (jsonl) file parsing.", + "type": "object", + "properties": { + "filetype": { + "title": "Filetype", + "const": "jsonl", + "type": "string" + }, + "newlines_in_values": { + "title": "Allow newlines in values", + "description": "Whether newline characters are allowed in JSON values. Turning this on may affect performance. Leave blank to default to False.", + "default": false, + "order": 0, + "type": "boolean" + }, + "unexpected_field_behavior": { + "title": "Unexpected field behavior", + "description": "How JSON fields outside of explicit_schema (if given) are treated. Check PyArrow documentation for details", + "default": "infer", + "examples": ["ignore", "infer", "error"], + "order": 1, + "allOf": [ + { + "title": "UnexpectedFieldBehaviorEnum", + "description": "An enumeration.", + "enum": ["ignore", "infer", "error"], + "type": "string" + } + ] + }, + "block_size": { + "title": "Block Size", + "description": "The chunk size in bytes to process at a time in memory from each file. If your data is particularly wide and failing during schema detection, increasing this should solve it. Beware of raising this too high as you could hit OOM errors.", + "default": 10000, + "order": 2, + "type": "integer" + } + } + } + ] + }, + "schema": { + "title": "Manually enforced data schema (Optional)", + "description": "Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { \"column\" : \"type\" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema.", + "default": "{}", + "examples": [ + "{\"column_1\": \"number\", \"column_2\": \"string\", \"column_3\": \"array\", \"column_4\": \"object\", \"column_5\": \"boolean\"}" + ], + "order": 30, + "type": "string" + }, + "provider": { + "title": "S3: Amazon Web Services", + "type": "object", + "properties": { + "bucket": { + "title": "Bucket", + "description": "Name of the S3 bucket where the file(s) exist.", + "order": 0, + "type": "string" + }, + "aws_access_key_id": { + "title": "AWS Access Key ID", + "description": "In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true, + "order": 1, + "type": "string" + }, + "aws_secret_access_key": { + "title": "AWS Secret Access Key", + "description": "In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper permissions. If accessing publicly available data, this field is not necessary.", + "airbyte_secret": true, + "order": 2, + "type": "string" + }, + "path_prefix": { + "title": "Path Prefix", + "description": "By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, we can optimize finding these in S3. This is optional but recommended if your bucket contains many folders/files which you don't need to replicate.", + "default": "", + "order": 3, + "type": "string" + }, + "endpoint": { + "title": "Endpoint", + "description": "Endpoint to an S3 compatible service. Leave empty to use AWS.", + "default": "", + "order": 4, + "type": "string" + }, + "use_ssl": { + "title": "Use TLS", + "description": "Whether the remote server is using a secure SSL/TLS connection. Only relevant if using an S3-compatible, non-AWS server", + "order": 5, + "type": "boolean" + }, + "verify_ssl_cert": { + "title": "Verify TLS Certificates", + "description": "Set this to false to allow self signed certificates. Only relevant if using an S3-compatible, non-AWS server", + "order": 6, + "type": "boolean" + } + }, + "required": ["bucket"], + "order": 11, + "description": "Use this to load files from S3 or S3-compatible services" + } + }, + "required": ["dataset", "path_pattern", "provider"] + }, + "supportsIncremental": true, + "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"] +} diff --git a/jvm/src/main/resources/airbyte/source-salesloft.json b/jvm/src/main/resources/airbyte/source-salesloft.json new file mode 100644 index 0000000..e7f43c8 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-salesloft.json @@ -0,0 +1,36 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/salesloft", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Salesloft Spec", + "type": "object", + "required": ["client_id", "client_secret", "refresh_token", "start_date"], + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your Salesloft developer application." + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your Salesloft developer application.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "The token for obtaining a new access token.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2020-11-16T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-search-metrics.json b/jvm/src/main/resources/airbyte/source-search-metrics.json new file mode 100644 index 0000000..c33c10d --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-search-metrics.json @@ -0,0 +1,71 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/seacrh-metrics", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Search Metrics Spec", + "type": "object", + "required": ["api_key", "client_secret", "country_code", "start_date"], + "additionalProperties": true, + "properties": { + "api_key": { + "title": "API Key", + "type": "string", + "description": "", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "", + "airbyte_secret": true + }, + "country_code": { + "title": "Country Code", + "type": "string", + "default": "", + "description": "The region of the S3 staging bucket to use if utilising a copy strategy.", + "enum": [ + "", + "AR", + "AU", + "AT", + "BE", + "BR", + "CA", + "CN", + "CO", + "DK", + "FI", + "FR", + "DE", + "HK", + "IN", + "IE", + "IT", + "JP", + "MX", + "NL", + "NO", + "PL", + "RU", + "SG", + "ZA", + "ES", + "SE", + "CH", + "TR", + "US", + "GB" + ], + "order": 2 + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z.", + "examples": ["20200925"], + "pattern": "^[0-9]{4}[0-9]{2}[0-9]{2}$" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-sendgrid.json b/jvm/src/main/resources/airbyte/source-sendgrid.json new file mode 100644 index 0000000..acaceb6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-sendgrid.json @@ -0,0 +1,25 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/sendgrid", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Sendgrid Spec", + "type": "object", + "required": ["apikey"], + "additionalProperties": true, + "properties": { + "apikey": { + "title": "Sendgrid API key", + "type": "string", + "description": "API Key, use admin to generate this key.", + "order": 0 + }, + "start_time": { + "title": "Start time", + "type": "integer", + "description": "Start time in timestamp integer format. Any data before this timestamp will not be replicated.", + "examples": [1558359837], + "order": 1 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-sentry.json b/jvm/src/main/resources/airbyte/source-sentry.json new file mode 100644 index 0000000..7820a6e --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-sentry.json @@ -0,0 +1,34 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/sentry", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Sentry Spec", + "type": "object", + "required": ["auth_token", "organization", "project"], + "additionalProperties": true, + "properties": { + "auth_token": { + "type": "string", + "title": "Authentication Tokens", + "description": "Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting \"{instance_url_prefix}/settings/account/api/auth-tokens/\"", + "airbyte_secret": true + }, + "hostname": { + "type": "string", + "title": "Host Name", + "description": "Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty.", + "default": "sentry.io" + }, + "organization": { + "type": "string", + "title": "Organization", + "description": "The slug of the organization the groups belong to." + }, + "project": { + "type": "string", + "title": "Project", + "description": "The name (slug) of the Project you want to sync." + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-sftp.json b/jvm/src/main/resources/airbyte/source-sftp.json new file mode 100644 index 0000000..c697dd6 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-sftp.json @@ -0,0 +1,106 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/source/sftp", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "SFTP Source Spec", + "type": "object", + "required": ["user", "host", "port"], + "additionalProperties": true, + "properties": { + "user": { + "title": "User Name", + "description": "The server user", + "type": "string", + "order": 0 + }, + "host": { + "title": "Host Address", + "description": "The server host address", + "type": "string", + "examples": ["www.host.com", "192.0.2.1"], + "order": 1 + }, + "port": { + "title": "Port", + "description": "The server port", + "type": "integer", + "default": 22, + "examples": ["22"], + "order": 2 + }, + "credentials": { + "type": "object", + "title": "Authentication *", + "description": "The server authentication method", + "order": 3, + "oneOf": [ + { + "title": "Password Authentication", + "required": ["auth_method", "auth_user_password"], + "properties": { + "auth_method": { + "description": "Connect through password authentication", + "type": "string", + "const": "SSH_PASSWORD_AUTH", + "order": 0 + }, + "auth_user_password": { + "title": "Password", + "description": "OS-level password for logging into the jump server host", + "type": "string", + "airbyte_secret": true, + "order": 1 + } + } + }, + { + "title": "SSH Key Authentication", + "required": ["auth_method", "auth_ssh_key"], + "properties": { + "auth_method": { + "description": "Connect through ssh key", + "type": "string", + "const": "SSH_KEY_AUTH", + "order": 0 + }, + "auth_ssh_key": { + "title": "SSH Private Key", + "description": "OS-level user account ssh key credentials in RSA PEM format ( created with ssh-keygen -t rsa -m PEM -f myuser_rsa )", + "type": "string", + "airbyte_secret": true, + "multiline": true, + "order": 1 + } + } + } + ] + }, + "file_types": { + "title": "File types", + "description": "Coma separated file types. Currently only 'csv' and 'json' types are supported.", + "type": "string", + "default": "csv,json", + "order": 4, + "examples": ["csv,json", "csv"] + }, + "folder_path": { + "title": "Folder Path (Optional)", + "description": "The directory to search files for sync", + "type": "string", + "default": "", + "examples": ["/logs/2022"], + "order": 5 + }, + "file_pattern": { + "title": "File Pattern (Optional)", + "description": "The regular expression to specify files for sync in a chosen Folder Path", + "type": "string", + "default": "", + "examples": [ + "log-([0-9]{4})([0-9]{2})([0-9]{2}) - This will filter files which `log-yearmmdd`" + ], + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-shopify.json b/jvm/src/main/resources/airbyte/source-shopify.json new file mode 100644 index 0000000..9ef00e3 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-shopify.json @@ -0,0 +1,137 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/shopify", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Shopify Source CDK Specifications", + "type": "object", + "required": ["shop", "start_date"], + "additionalProperties": true, + "properties": { + "shop": { + "type": "string", + "title": "Shopify Store", + "description": "The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'.", + "order": 1 + }, + "credentials": { + "title": "Shopify Authorization Method", + "description": "The authorization method to use to retrieve data from Shopify", + "type": "object", + "order": 2, + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "description": "OAuth2.0", + "required": ["auth_method"], + "properties": { + "auth_method": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of the Shopify developer application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of the Shopify developer application.", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The Access Token for making authenticated requests.", + "airbyte_secret": true + } + } + }, + { + "title": "API Password", + "description": "API Password Auth", + "type": "object", + "required": ["auth_method", "api_password"], + "properties": { + "auth_method": { + "type": "string", + "const": "api_password", + "order": 0 + }, + "api_password": { + "type": "string", + "title": "API Password", + "description": "The API Password for your private application in the `Shopify` store.", + "airbyte_secret": true + } + } + } + ] + }, + "start_date": { + "type": "string", + "title": "Replication Start Date", + "description": "The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated.", + "examples": ["2021-01-01"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "order": 3 + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_method"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "shop": { + "type": "string", + "path_in_connector_config": ["shop"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-shortio.json b/jvm/src/main/resources/airbyte/source-shortio.json new file mode 100644 index 0000000..27e39c4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-shortio.json @@ -0,0 +1,29 @@ +{ + "documentationUrl": "https://developers.short.io/reference", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Shortio Spec", + "type": "object", + "required": ["domain_id", "secret_key", "start_date"], + "properties": { + "domain_id": { + "type": "string", + "desciprtion": "Short.io Domain ID", + "title": "Domain ID", + "airbyte_secret": false + }, + "secret_key": { + "type": "string", + "title": "Secret Key", + "description": "Short.io Secret Key", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "airbyte_secret": false + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-slack.json b/jvm/src/main/resources/airbyte/source-slack.json new file mode 100644 index 0000000..7fce7bb --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-slack.json @@ -0,0 +1,119 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/slack", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Slack Spec", + "type": "object", + "required": ["start_date", "lookback_window", "join_channels"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2017-01-25T00:00:00Z"], + "title": "Start Date" + }, + "lookback_window": { + "type": "integer", + "title": "Threads Lookback window (Days)", + "description": "How far into the past to look for messages in threads.", + "examples": [7, 14] + }, + "join_channels": { + "type": "boolean", + "default": true, + "title": "Join all channels", + "description": "Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages. " + }, + "channel_filter": { + "type": "array", + "default": [], + "items": { + "type": "string", + "minLength": 0 + }, + "title": "Channel name filter", + "description": "A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter.", + "examples": ["channel_one", "channel_two"] + }, + "credentials": { + "title": "Authentication mechanism", + "description": "Choose how to authenticate into Slack", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "Sign in via Slack (OAuth)", + "required": [ + "access_token", + "client_id", + "client_secret", + "option_title" + ], + "properties": { + "option_title": { + "type": "string", + "const": "Default OAuth2.0 authorization" + }, + "client_id": { + "title": "Client ID", + "description": "Slack client_id. See our docs if you need help finding this id.", + "type": "string", + "examples": ["slack-client-id-example"] + }, + "client_secret": { + "title": "Client Secret", + "description": "Slack client_secret. See our docs if you need help finding this secret.", + "type": "string", + "examples": ["slack-client-secret-example"], + "airbyte_secret": true + }, + "access_token": { + "title": "Access token", + "description": "Slack access_token. See our docs if you need help generating the token.", + "type": "string", + "examples": ["slack-access-token-example"], + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh token", + "description": "Slack refresh_token. See our docs if you need help generating the token.", + "type": "string", + "examples": ["slack-refresh-token-example"], + "airbyte_secret": true + } + }, + "order": 0 + }, + { + "type": "object", + "title": "API Token", + "required": ["api_token", "option_title"], + "properties": { + "option_title": { + "type": "string", + "const": "API Token Credentials" + }, + "api_token": { + "type": "string", + "title": "API Token", + "description": "A Slack bot token. See the docs for instructions on how to generate it.", + "airbyte_secret": true + } + }, + "order": 1 + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["access_token"], ["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-smartsheets.json b/jvm/src/main/resources/airbyte/source-smartsheets.json new file mode 100644 index 0000000..c756aff --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-smartsheets.json @@ -0,0 +1,69 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/smartsheets", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Smartsheets Source Spec", + "type": "object", + "required": ["access_token", "spreadsheet_id"], + "additionalProperties": true, + "properties": { + "access_token": { + "title": "Access Token", + "description": "The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token.", + "type": "string", + "order": 0, + "airbyte_secret": true + }, + "spreadsheet_id": { + "title": "Sheet ID", + "description": "The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties", + "type": "string", + "order": 1 + }, + "start_datetime": { + "title": "Start Datetime (Optional)", + "type": "string", + "examples": ["2000-01-01T13:00:00", "2000-01-01T13:00:00-07:00"], + "description": "Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00`", + "format": "date-time", + "default": "2020-01-01T00:00:00+00:00", + "order": 2, + "airbyte_hidden": true + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": [], + "predicate_value": "", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": {} + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-snapchat-marketing.json b/jvm/src/main/resources/airbyte/source-snapchat-marketing.json new file mode 100644 index 0000000..5b1cf2c --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-snapchat-marketing.json @@ -0,0 +1,57 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/snapchat-marketing", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Snapchat Marketing Spec", + "type": "object", + "required": ["client_id", "client_secret", "refresh_token"], + "properties": { + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your Snapchat developer application.", + "airbyte_secret": true, + "order": 0 + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Client Secret of your Snapchat developer application.", + "airbyte_secret": true, + "order": 1 + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "Refresh Token to renew the expired Access Token.", + "airbyte_secret": true, + "order": 2 + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "Date in the format 2022-01-01. Any data before this date will not be replicated.", + "examples": ["2022-01-01"], + "default": "2022-01-01", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "order": 3 + }, + "end_date": { + "type": "string", + "title": "End Date (Optional)", + "description": "Date in the format 2017-01-25. Any data after this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "examples": ["2022-01-30"], + "order": 4 + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-snowflake.json b/jvm/src/main/resources/airbyte/source-snowflake.json new file mode 100644 index 0000000..467ddcb --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-snowflake.json @@ -0,0 +1,186 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/snowflake", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Snowflake Source Spec", + "type": "object", + "required": ["host", "role", "warehouse", "database", "schema"], + "properties": { + "credentials": { + "title": "Authorization Method", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "order": 0, + "required": ["client_id", "client_secret", "auth_type"], + "properties": { + "auth_type": { + "type": "string", + "const": "OAuth", + "default": "OAuth", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your Snowflake developer application.", + "airbyte_secret": true, + "order": 1 + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your Snowflake developer application.", + "airbyte_secret": true, + "order": 2 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true, + "order": 3 + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Refresh Token for making authenticated requests.", + "airbyte_secret": true, + "order": 4 + } + } + }, + { + "title": "Username and Password", + "type": "object", + "required": ["username", "password", "auth_type"], + "order": 1, + "properties": { + "auth_type": { + "type": "string", + "const": "username/password", + "default": "username/password", + "order": 0 + }, + "username": { + "description": "The username you created to allow Airbyte to access the database.", + "examples": ["AIRBYTE_USER"], + "type": "string", + "title": "Username", + "order": 1 + }, + "password": { + "description": "The password associated with the username.", + "type": "string", + "airbyte_secret": true, + "title": "Password", + "order": 2 + } + } + } + ], + "order": 0 + }, + "host": { + "description": "The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com).", + "examples": ["accountname.us-east-2.aws.snowflakecomputing.com"], + "type": "string", + "title": "Account Name", + "order": 1 + }, + "role": { + "description": "The role you created for Airbyte to access Snowflake.", + "examples": ["AIRBYTE_ROLE"], + "type": "string", + "title": "Role", + "order": 2 + }, + "warehouse": { + "description": "The warehouse you created for Airbyte to access data.", + "examples": ["AIRBYTE_WAREHOUSE"], + "type": "string", + "title": "Warehouse", + "order": 3 + }, + "database": { + "description": "The database you created for Airbyte to access data.", + "examples": ["AIRBYTE_DATABASE"], + "type": "string", + "title": "Database", + "order": 4 + }, + "schema": { + "description": "The source Snowflake schema tables.", + "examples": ["AIRBYTE_SCHEMA"], + "type": "string", + "title": "Schema", + "order": 5 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).", + "title": "JDBC URL Params", + "type": "string", + "order": 6 + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "OAuth", + "oauth_config_specification": { + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "properties": { + "host": { + "type": "string", + "path_in_connector_config": ["host"] + }, + "role": { + "type": "string", + "path_in_connector_config": ["role"] + } + } + }, + "complete_oauth_output_specification": { + "type": "object", + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + }, + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-square.json b/jvm/src/main/resources/airbyte/source-square.json new file mode 100644 index 0000000..f8719f2 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-square.json @@ -0,0 +1,148 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/square", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Square Source CDK Specifications", + "type": "object", + "required": ["is_sandbox"], + "additionalProperties": true, + "properties": { + "is_sandbox": { + "type": "boolean", + "description": "Determines whether to use the sandbox or production environment.", + "title": "Sandbox", + "examples": [true, false], + "default": false + }, + "start_date": { + "type": "string", + "description": "UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated.", + "title": "Start Date", + "examples": ["2021-01-01"], + "default": "2021-01-01", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "include_deleted_objects": { + "type": "boolean", + "description": "In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes)", + "title": "Include Deleted Objects", + "examples": [true, false], + "default": false + }, + "credentials": { + "type": "object", + "title": "Credential Type", + "oneOf": [ + { + "title": "Oauth authentication", + "type": "object", + "required": [ + "auth_type", + "client_id", + "client_secret", + "refresh_token" + ], + "properties": { + "auth_type": { + "type": "string", + "const": "Oauth", + "enum": ["Oauth"], + "default": "Oauth", + "order": 0 + }, + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Square-issued ID of your application", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The Square-issued application secret for your application", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "A refresh token generated using the above client ID and secret", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "API Key", + "required": ["auth_type", "api_key"], + "properties": { + "auth_type": { + "type": "string", + "const": "Apikey", + "enum": ["Apikey"], + "default": "Apikey", + "order": 1 + }, + "api_key": { + "title": "API key token", + "type": "string", + "description": "The API key for a Square application", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials", 0], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "Oauth", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-strava.json b/jvm/src/main/resources/airbyte/source-strava.json new file mode 100644 index 0000000..5515b58 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-strava.json @@ -0,0 +1,103 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/strava", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Strava Spec", + "type": "object", + "required": [ + "client_id", + "client_secret", + "refresh_token", + "athlete_id", + "start_date" + ], + "additionalProperties": false, + "properties": { + "auth_type": { + "type": "string", + "const": "Client", + "enum": ["Client"], + "default": "Client" + }, + "client_id": { + "type": "string", + "description": "The Client ID of your Strava developer application.", + "title": "Client ID", + "pattern": "^[0-9_\\-]+$", + "examples": ["12345"] + }, + "client_secret": { + "type": "string", + "description": "The Client Secret of your Strava developer application.", + "title": "Client Secret", + "pattern": "^[0-9a-fA-F]+$", + "examples": ["fc6243f283e51f6ca989aab298b17da125496f50"], + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "description": "The Refresh Token with the activity: read_all permissions.", + "title": "Refresh Token", + "pattern": "^[0-9a-fA-F]+$", + "examples": ["fc6243f283e51f6ca989aab298b17da125496f50"], + "airbyte_secret": true + }, + "athlete_id": { + "type": "integer", + "description": "The Athlete ID of your Strava developer application.", + "title": "Athlete ID", + "pattern": "^[0-9_\\-]+$", + "examples": ["17831421"] + }, + "start_date": { + "type": "string", + "description": "UTC date and time. Any data before this date will not be replicated.", + "title": "Start Date", + "examples": ["2016-12-31 23:59:59"] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["auth_type"], + "predicate_value": "Client", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "refresh_token": { + "type": "string", + "path_in_connector_config": ["refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["client_secret"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-surveymonkey.json b/jvm/src/main/resources/airbyte/source-surveymonkey.json new file mode 100644 index 0000000..eb69723 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-surveymonkey.json @@ -0,0 +1,45 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/surveymonkey", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "SurveyMonkey Spec", + "type": "object", + "required": ["access_token", "start_date"], + "additionalProperties": true, + "properties": { + "access_token": { + "title": "Access Token", + "order": 0, + "type": "string", + "airbyte_secret": true, + "description": "Access Token for making authenticated requests. See the docs for information on how to generate this key." + }, + "start_date": { + "title": "Start Date", + "order": 1, + "type": "string", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z?$", + "examples": ["2021-01-01T00:00:00Z"] + }, + "survey_ids": { + "type": "array", + "order": 2, + "items": { + "type": "string", + "pattern": "^[0-9]{8,9}$" + }, + "title": "Survey Monkey survey IDs", + "description": "IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated." + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["access_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-talkdesk-explore.json b/jvm/src/main/resources/airbyte/source-talkdesk-explore.json new file mode 100644 index 0000000..281ebf9 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-talkdesk-explore.json @@ -0,0 +1,43 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Talkdesk Explore API Spec", + "type": "object", + "required": ["start_date", "auth_url", "api_key"], + "additionalProperties": false, + "properties": { + "start_date": { + "type": "string", + "title": "START DATE", + "description": "The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated.", + "examples": ["2020-10-15T00:00:00"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$", + "order": 0 + }, + "timezone": { + "type": "string", + "title": "TIMEZONE", + "description": "Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones)", + "examples": ["Europe/London", "America/Los_Angeles"], + "default": "UTC", + "order": 1 + }, + "auth_url": { + "title": "AUTH URL", + "type": "string", + "description": "Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment.", + "examples": [ + "https://xxxxxx.talkdeskid.com/oauth/token?grant_type=client_credentials" + ], + "order": 2 + }, + "api_key": { + "title": "API KEY", + "type": "string", + "description": "Talkdesk API key.", + "order": 3 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-tempo.json b/jvm/src/main/resources/airbyte/source-tempo.json new file mode 100644 index 0000000..18e9c13 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-tempo.json @@ -0,0 +1,18 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Tempo Spec", + "type": "object", + "required": ["api_token"], + "additionalProperties": false, + "properties": { + "api_token": { + "type": "string", + "title": "API token", + "description": "Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-tidb.json b/jvm/src/main/resources/airbyte/source-tidb.json new file mode 100755 index 0000000..79d50a7 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-tidb.json @@ -0,0 +1,59 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/tidb", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "TiDB Source Spec", + "type": "object", + "required": ["host", "port", "database", "username"], + "properties": { + "host": { + "description": "Hostname of the database.", + "title": "Host", + "type": "string", + "order": 0 + }, + "port": { + "description": "Port of the database.", + "title": "Port", + "type": "integer", + "minimum": 0, + "maximum": 65536, + "default": 4000, + "examples": ["4000"], + "order": 1 + }, + "database": { + "description": "Name of the database.", + "title": "Database", + "type": "string", + "order": 2 + }, + "username": { + "description": "Username to use to access the database.", + "title": "Username", + "type": "string", + "order": 3 + }, + "password": { + "description": "Password associated with the username.", + "title": "Password", + "type": "string", + "airbyte_secret": true, + "order": 4 + }, + "jdbc_url_params": { + "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3)", + "title": "JDBC URL Params", + "type": "string", + "order": 5 + }, + "ssl": { + "title": "SSL Connection", + "description": "Encrypt data using SSL.", + "type": "boolean", + "default": false, + "order": 6 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-timely.json b/jvm/src/main/resources/airbyte/source-timely.json new file mode 100644 index 0000000..98b085b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-timely.json @@ -0,0 +1,29 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Timely Integration Spec", + "type": "object", + "required": ["account_id", "start_date", "bearer_token"], + "additionalProperties": false, + "properties": { + "account_id": { + "title": "account_id", + "type": "string", + "description": "Timely account id" + }, + "start_date": { + "title": "startDate", + "type": "string", + "description": "start date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$", + "example": "2022-05-06" + }, + "bearer_token": { + "title": "Bearer token", + "type": "string", + "description": "Timely bearer token" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-tplcentral.json b/jvm/src/main/resources/airbyte/source-tplcentral.json new file mode 100644 index 0000000..4127110 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-tplcentral.json @@ -0,0 +1,55 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/tplcentral", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Tplcentral Spec", + "type": "object", + "required": ["url_base", "client_id", "client_secret"], + "additionalProperties": false, + "properties": { + "url_base": { + "title": "URL base", + "type": "string", + "format": "uri", + "default": "https://secure-wms.com/" + }, + "client_id": { + "title": "Client ID", + "type": "string" + }, + "client_secret": { + "title": "Client secret", + "type": "string", + "airbyte_secret": true + }, + "user_login_id": { + "title": "User login ID", + "description": "User login ID and/or name is required", + "type": "integer" + }, + "user_login": { + "title": "User login name", + "description": "User login ID and/or name is required", + "type": "string" + }, + "tpl_key": { + "title": "3PL GUID", + "type": "string" + }, + "customer_id": { + "title": "Customer ID", + "type": "integer" + }, + "facility_id": { + "title": "Facility ID", + "type": "integer" + }, + "start_date": { + "title": "Start date", + "description": "Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00.", + "type": "string", + "format": "date-time" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-trello.json b/jvm/src/main/resources/airbyte/source-trello.json new file mode 100644 index 0000000..aac6450 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-trello.json @@ -0,0 +1,48 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/trello", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Trello Spec", + "type": "object", + "required": ["token", "key", "start_date"], + "additionalProperties": true, + "properties": { + "token": { + "type": "string", + "title": "API token", + "description": "Trello v API token. See the docs for instructions on how to generate it.", + "airbyte_secret": true + }, + "key": { + "type": "string", + "title": "API key", + "description": "Trello API key. See the docs for instructions on how to generate it.", + "airbyte_secret": true + }, + "start_date": { + "type": "string", + "title": "Start Date", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}Z$", + "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.", + "examples": ["2021-03-01T00:00:00.000Z"] + }, + "board_ids": { + "type": "array", + "items": { + "type": "string", + "pattern": "^[0-9a-fA-F]{24}$" + }, + "title": "Trello Board IDs", + "description": "IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated." + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": [], + "oauthFlowInitParameters": [], + "oauthFlowOutputParameters": [["token"], ["key"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-twilio.json b/jvm/src/main/resources/airbyte/source-twilio.json new file mode 100644 index 0000000..4d80f05 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-twilio.json @@ -0,0 +1,44 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/twilio", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Twilio Spec", + "type": "object", + "required": ["account_sid", "auth_token", "start_date"], + "additionalProperties": true, + "properties": { + "account_sid": { + "title": "Account ID", + "description": "Twilio account SID", + "airbyte_secret": true, + "type": "string", + "order": 1 + }, + "auth_token": { + "title": "Auth Token", + "description": "Twilio Auth Token.", + "airbyte_secret": true, + "type": "string", + "order": 2 + }, + "start_date": { + "title": "Replication Start Date", + "description": "UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2020-10-01T00:00:00Z"], + "type": "string", + "order": 3 + }, + "lookback_window": { + "title": "Lookback window", + "description": "How far into the past to look for records. (in minutes)", + "examples": [60], + "default": 0, + "type": "integer", + "order": 4 + } + } + }, + "supportsIncremental": true, + "supported_destination_sync_modes": ["append"] +} diff --git a/jvm/src/main/resources/airbyte/source-typeform.json b/jvm/src/main/resources/airbyte/source-typeform.json new file mode 100644 index 0000000..2d622d7 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-typeform.json @@ -0,0 +1,34 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/typeform", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Typeform Spec", + "type": "object", + "required": ["token", "start_date"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "description": "UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated.", + "title": "Start Date", + "examples": ["2020-01-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "token": { + "type": "string", + "description": "The API Token for a Typeform account.", + "title": "API Token", + "airbyte_secret": true + }, + "form_ids": { + "title": "Form IDs to replicate", + "description": "When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL \"https://mysite.typeform.com/to/u6nXL7\" the form_id is u6nXL7. You can find form URLs on Share panel", + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-us-census.json b/jvm/src/main/resources/airbyte/source-us-census.json new file mode 100644 index 0000000..7028725 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-us-census.json @@ -0,0 +1,36 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/us-census", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "https://api.census.gov/ Source Spec", + "type": "object", + "required": ["api_key", "query_path"], + "additionalProperties": false, + "properties": { + "query_params": { + "type": "string", + "description": "The query parameters portion of the GET request, without the api key", + "pattern": "^\\w+=[\\w,:*]+(&(?!key)\\w+=[\\w,:*]+)*$", + "examples": [ + "get=NAME,NAICS2017_LABEL,LFO_LABEL,EMPSZES_LABEL,ESTAB,PAYANN,PAYQTR1,EMP&for=us:*&NAICS2017=72&LFO=001&EMPSZES=001", + "get=MOVEDIN,GEOID1,GEOID2,MOVEDOUT,FULL1_NAME,FULL2_NAME,MOVEDNET&for=county:*" + ] + }, + "query_path": { + "type": "string", + "description": "The path portion of the GET request", + "pattern": "^data(\\/[\\w\\d]+)+$", + "examples": [ + "data/2019/cbp", + "data/2018/acs", + "data/timeseries/healthins/sahie" + ] + }, + "api_key": { + "type": "string", + "description": "Your API Key. Get your key here.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-woocommerce.json b/jvm/src/main/resources/airbyte/source-woocommerce.json new file mode 100644 index 0000000..34b8ccd --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-woocommerce.json @@ -0,0 +1,42 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/woocommerce", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Woocommerce Source CDK Specifications", + "type": "object", + "required": ["shop", "start_date", "api_key", "api_secret"], + "additionalProperties": false, + "properties": { + "shop": { + "type": "string", + "description": "The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'." + }, + "start_date": { + "type": "string", + "description": "The date you would like to replicate data. Format: YYYY-MM-DD.", + "examples": ["2021-01-01"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "api_key": { + "type": "string", + "description": "The CUSTOMER KEY for API in WooCommerce shop.", + "airbyte_secret": true + }, + "api_secret": { + "type": "string", + "description": "The CUSTOMER SECRET for API in WooCommerce shop.", + "airbyte_secret": true + }, + "conversion_window_days": { + "title": "Conversion Window (Optional)", + "type": "integer", + "description": "A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads.", + "minimum": 0, + "maximum": 1095, + "default": 0, + "examples": [14], + "order": 5 + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-yahoo-finance-price.json b/jvm/src/main/resources/airbyte/source-yahoo-finance-price.json new file mode 100644 index 0000000..f1c981e --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-yahoo-finance-price.json @@ -0,0 +1,56 @@ +{ + "documentationUrl": "https://docsurl.com", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Yahoo Finance Spec", + "type": "object", + "required": ["tickers"], + "additionalProperties": false, + "properties": { + "tickers": { + "type": "string", + "order": 0, + "description": "Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed.", + "examples": ["AAPL, GOOGL, GEO.MI"] + }, + "interval": { + "title": "Interval", + "order": 1, + "description": "The interval of between prices queried.", + "type": "string", + "enum": [ + "1m", + "5m", + "15m", + "30m", + "90m", + "1h", + "1d", + "5d", + "1wk", + "1mo", + "3mo" + ] + }, + "range": { + "title": "Range", + "order": 2, + "description": "The range of prices to be queried.", + "type": "string", + "enum": [ + "1d", + "5d", + "7d", + "1mo", + "3mo", + "6mo", + "1y", + "2y", + "5y", + "ytd", + "max" + ] + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-youtube-analytics.json b/jvm/src/main/resources/airbyte/source-youtube-analytics.json new file mode 100644 index 0000000..d041343 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-youtube-analytics.json @@ -0,0 +1,46 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/youtube-analytics", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "YouTube Analytics Spec", + "type": "object", + "required": ["credentials"], + "additionalProperties": true, + "properties": { + "credentials": { + "title": "Authenticate via OAuth 2.0", + "type": "object", + "required": ["client_id", "client_secret", "refresh_token"], + "additionalProperties": true, + "properties": { + "client_id": { + "title": "Client ID", + "type": "string", + "description": "The Client ID of your developer application", + "airbyte_secret": true + }, + "client_secret": { + "title": "Client Secret", + "type": "string", + "description": "The client secret of your developer application", + "airbyte_secret": true + }, + "refresh_token": { + "title": "Refresh Token", + "type": "string", + "description": "A refresh token generated using the above client ID and secret", + "airbyte_secret": true + } + } + } + } + }, + "authSpecification": { + "auth_type": "oauth2.0", + "oauth2Specification": { + "rootObject": ["credentials"], + "oauthFlowInitParameters": [["client_id"], ["client_secret"]], + "oauthFlowOutputParameters": [["refresh_token"]] + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zendesk-chat.json b/jvm/src/main/resources/airbyte/source-zendesk-chat.json new file mode 100644 index 0000000..671096b --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zendesk-chat.json @@ -0,0 +1,138 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zendesk-chat", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Zendesk Chat Spec", + "type": "object", + "required": ["start_date"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z.", + "examples": ["2021-02-01T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "subdomain": { + "type": "string", + "title": "Subdomain (Optional)", + "description": "Required if you access Zendesk Chat from a Zendesk Support subdomain.", + "default": "" + }, + "credentials": { + "title": "Authorization Method", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": ["credentials"], + "properties": { + "credentials": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your OAuth application", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your OAuth application.", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Access Token for making authenticated requests.", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "Refresh Token to obtain new Access Token, when it's expired.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "Access Token", + "required": ["credentials", "access_token"], + "properties": { + "credentials": { + "type": "string", + "const": "access_token", + "order": 0 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The Access Token to make authenticated requests.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "credentials"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + }, + "refresh_token": { + "type": "string", + "path_in_connector_config": ["credentials", "refresh_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "properties": { + "subdomain": { + "type": "string", + "path_in_connector_config": ["subdomain"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zendesk-sunshine.json b/jvm/src/main/resources/airbyte/source-zendesk-sunshine.json new file mode 100644 index 0000000..03a04f1 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zendesk-sunshine.json @@ -0,0 +1,145 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zendesk_sunshine", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Zendesk Sunshine Spec", + "type": "object", + "required": ["start_date", "subdomain"], + "additionalProperties": true, + "properties": { + "subdomain": { + "title": "Subdomain", + "type": "string", + "description": "The subdomain for your Zendesk Account." + }, + "start_date": { + "title": "Start Date", + "type": "string", + "description": "The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z.", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$", + "examples": ["2021-01-01T00:00:00Z"] + }, + "credentials": { + "title": "Authorization Method", + "type": "object", + "oneOf": [ + { + "type": "object", + "title": "OAuth2.0", + "required": [ + "auth_method", + "client_id", + "client_secret", + "access_token" + ], + "properties": { + "auth_method": { + "type": "string", + "const": "oauth2.0", + "enum": ["oauth2.0"], + "default": "oauth2.0", + "order": 0 + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "The Client ID of your OAuth application.", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "The Client Secret of your OAuth application.", + "airbyte_secret": true + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "Long-term access Token for making authenticated requests.", + "airbyte_secret": true + } + } + }, + { + "type": "object", + "title": "API Token", + "required": ["auth_method", "api_token", "email"], + "properties": { + "auth_method": { + "type": "string", + "const": "api_token", + "enum": ["api_token"], + "default": "api_token", + "order": 1 + }, + "api_token": { + "type": "string", + "title": "API Token", + "description": "API Token. See the docs for information on how to generate this key.", + "airbyte_secret": true + }, + "email": { + "type": "string", + "title": "Email", + "description": "The user email for your Zendesk account" + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_method"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "subdomain": { + "type": "string", + "path_in_connector_config": ["subdomain"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zendesk-support.json b/jvm/src/main/resources/airbyte/source-zendesk-support.json new file mode 100644 index 0000000..ede42b4 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zendesk-support.json @@ -0,0 +1,127 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zendesk-support", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Zendesk Support Spec", + "type": "object", + "required": ["start_date", "subdomain"], + "additionalProperties": true, + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2020-10-15T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "subdomain": { + "type": "string", + "title": "Subdomain", + "description": "This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain." + }, + "credentials": { + "title": "Authentication *", + "type": "object", + "description": "Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`.", + "oneOf": [ + { + "title": "OAuth2.0", + "type": "object", + "required": ["access_token"], + "additionalProperties": true, + "properties": { + "credentials": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The value of the API token generated. See the docs for more information.", + "airbyte_secret": true + } + } + }, + { + "title": "API Token", + "type": "object", + "required": ["email", "api_token"], + "additionalProperties": true, + "properties": { + "credentials": { + "type": "string", + "const": "api_token", + "order": 0 + }, + "email": { + "title": "Email", + "type": "string", + "description": "The user email for your Zendesk account." + }, + "api_token": { + "title": "API Token", + "type": "string", + "description": "The value of the API token generated. See the docs for more information.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "credentials"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "subdomain": { + "type": "string", + "path_in_connector_config": ["subdomain"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zendesk-talk.json b/jvm/src/main/resources/airbyte/source-zendesk-talk.json new file mode 100644 index 0000000..c4c0e36 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zendesk-talk.json @@ -0,0 +1,125 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zendesk-talk", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Zendesk Talk Spec", + "type": "object", + "required": ["start_date", "subdomain"], + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.", + "examples": ["2020-10-15T00:00:00Z"], + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$" + }, + "subdomain": { + "type": "string", + "title": "Subdomain", + "description": "This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain." + }, + "credentials": { + "title": "Authentication", + "type": "object", + "description": "Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`.", + "oneOf": [ + { + "title": "API Token", + "type": "object", + "required": ["email", "api_token"], + "additionalProperties": true, + "properties": { + "auth_type": { + "type": "string", + "const": "api_token" + }, + "email": { + "title": "Email", + "type": "string", + "description": "The user email for your Zendesk account." + }, + "api_token": { + "title": "API Token", + "type": "string", + "description": "The value of the API token generated. See the docs for more information.", + "airbyte_secret": true + } + } + }, + { + "title": "OAuth2.0", + "type": "object", + "required": ["access_token"], + "additionalProperties": true, + "properties": { + "auth_type": { + "type": "string", + "const": "oauth2.0", + "order": 0 + }, + "access_token": { + "type": "string", + "title": "Access Token", + "description": "The value of the API token generated. See the docs for more information.", + "airbyte_secret": true + } + } + } + ] + } + } + }, + "advanced_auth": { + "auth_flow_type": "oauth2.0", + "predicate_key": ["credentials", "auth_type"], + "predicate_value": "oauth2.0", + "oauth_config_specification": { + "complete_oauth_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "access_token": { + "type": "string", + "path_in_connector_config": ["credentials", "access_token"] + } + } + }, + "complete_oauth_server_input_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string" + }, + "client_secret": { + "type": "string" + } + } + }, + "complete_oauth_server_output_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "path_in_connector_config": ["credentials", "client_id"] + }, + "client_secret": { + "type": "string", + "path_in_connector_config": ["credentials", "client_secret"] + } + } + }, + "oauth_user_input_from_connector_config_specification": { + "type": "object", + "additionalProperties": false, + "properties": { + "subdomain": { + "type": "string", + "path_in_connector_config": ["subdomain"] + } + } + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zenloop.json b/jvm/src/main/resources/airbyte/source-zenloop.json new file mode 100644 index 0000000..121a615 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zenloop.json @@ -0,0 +1,32 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zenloop", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Zenloop Spec", + "type": "object", + "required": ["api_token"], + "additionalProperties": false, + "properties": { + "api_token": { + "type": "string", + "description": "Zenloop API Token. You can get the API token in settings page here ", + "airbyte_secret": true + }, + "date_from": { + "type": "string", + "description": "Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced", + "examples": ["2021-10-24T03:30:30Z"] + }, + "survey_id": { + "type": "string", + "description": "Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys", + "airbyte_secret": true + }, + "survey_group_id": { + "type": "string", + "description": "Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zoho-crm.json b/jvm/src/main/resources/airbyte/source-zoho-crm.json new file mode 100644 index 0000000..fddfb45 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zoho-crm.json @@ -0,0 +1,69 @@ +{ + "documentationUrl": "https://docs.airbyte.com/integrations/sources/zoho-crm", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Zoho Crm Configuration", + "type": "object", + "required": [ + "client_id", + "client_secret", + "refresh_token", + "environment", + "dc_region", + "edition" + ], + "additionalProperties": false, + "properties": { + "client_id": { + "type": "string", + "title": "Client ID", + "description": "OAuth2.0 Client ID", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "OAuth2.0 Client Secret", + "airbyte_secret": true + }, + "refresh_token": { + "type": "string", + "title": "Refresh Token", + "description": "OAuth2.0 Refresh Token", + "airbyte_secret": true + }, + "dc_region": { + "title": "Data Center Location", + "type": "string", + "description": "Please choose the region of your Data Center location. More info by this Link", + "enum": ["US", "AU", "EU", "IN", "CN", "JP"] + }, + "environment": { + "title": "Environment", + "type": "string", + "description": "Please choose the environment", + "enum": ["Production", "Developer", "Sandbox"] + }, + "start_datetime": { + "title": "Start Date", + "type": ["null", "string"], + "examples": [ + "2000-01-01", + "2000-01-01 13:00", + "2000-01-01 13:00:00", + "2000-01-01T13:00+00:00", + "2000-01-01T13:00:00-07:00" + ], + "description": "ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM`", + "format": "date-time" + }, + "edition": { + "title": "Zoho CRM Edition", + "type": "string", + "description": "Choose your Edition of Zoho CRM to determine API Concurrency Limits", + "enum": ["Free", "Standard", "Professional", "Enterprise", "Ultimate"], + "default": "Free" + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zoom-singer.json b/jvm/src/main/resources/airbyte/source-zoom-singer.json new file mode 100644 index 0000000..d1cd601 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zoom-singer.json @@ -0,0 +1,18 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zoom", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Source Zoom Singer Spec", + "type": "object", + "required": ["jwt"], + "additionalProperties": false, + "properties": { + "jwt": { + "title": "JWT Token", + "type": "string", + "description": "Zoom JWT Token. See the docs for more information on how to obtain this key.", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/airbyte/source-zuora.json b/jvm/src/main/resources/airbyte/source-zuora.json new file mode 100644 index 0000000..dbf8498 --- /dev/null +++ b/jvm/src/main/resources/airbyte/source-zuora.json @@ -0,0 +1,66 @@ +{ + "documentationUrl": "https://docs.airbyte.io/integrations/sources/zuora", + "connectionSpecification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Zuora Connector Configuration", + "type": "object", + "required": [ + "start_date", + "tenant_endpoint", + "data_query", + "client_id", + "client_secret" + ], + "properties": { + "start_date": { + "type": "string", + "title": "Start Date", + "description": "Start Date in format: YYYY-MM-DD", + "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "window_in_days": { + "type": "string", + "title": "Query Window (in days)", + "description": "The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year).", + "examples": ["0.5", "1", "30", "60", "90", "120", "200", "364"], + "pattern": "^(0|[1-9]\\d*)(\\.\\d+)?$", + "default": "90" + }, + "tenant_endpoint": { + "title": "Tenant Endpoint Location", + "type": "string", + "description": "Please choose the right endpoint where your Tenant is located. More info by this Link", + "enum": [ + "US Production", + "US Cloud Production", + "US API Sandbox", + "US Cloud API Sandbox", + "US Central Sandbox", + "US Performance Test", + "EU Production", + "EU API Sandbox", + "EU Central Sandbox" + ] + }, + "data_query": { + "title": "Data Query Type", + "type": "string", + "description": "Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link", + "enum": ["Live", "Unlimited"], + "default": "Live" + }, + "client_id": { + "type": "string", + "title": "Client ID", + "description": "Your OAuth user Client ID", + "airbyte_secret": true + }, + "client_secret": { + "type": "string", + "title": "Client Secret", + "description": "Your OAuth user Client Secret", + "airbyte_secret": true + } + } + } +} diff --git a/jvm/src/main/resources/argo/clusterrolebindings/Argo.yml b/jvm/src/main/resources/argo/clusterrolebindings/Argo.yml new file mode 100644 index 0000000..499b510 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterrolebindings/Argo.yml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: + - kind: ServiceAccount + name: argo + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterrolebindings/ArgoEvents.yml b/jvm/src/main/resources/argo/clusterrolebindings/ArgoEvents.yml new file mode 100644 index 0000000..df4436c --- /dev/null +++ b/jvm/src/main/resources/argo/clusterrolebindings/ArgoEvents.yml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: argo-events-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-events-role +subjects: + - kind: ServiceAccount + name: argo-events-sa + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterrolebindings/ArgoRollouts.yml b/jvm/src/main/resources/argo/clusterrolebindings/ArgoRollouts.yml new file mode 100644 index 0000000..b45a271 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterrolebindings/ArgoRollouts.yml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rollouts-controller + app.kubernetes.io/name: argo-rollouts-clusterrolebinding + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-rollouts-clusterrole +subjects: + - kind: ServiceAccount + name: argo-rollouts + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterrolebindings/ArgoServer.yml b/jvm/src/main/resources/argo/clusterrolebindings/ArgoServer.yml new file mode 100644 index 0000000..c0c1615 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterrolebindings/ArgoServer.yml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: argo-server-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-server-cluster-role +subjects: + - kind: ServiceAccount + name: argo-server + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/Argo.yml b/jvm/src/main/resources/argo/clusterroles/Argo.yml new file mode 100644 index 0000000..7879ceb --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/Argo.yml @@ -0,0 +1,81 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: + - apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete + - create + - apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list + - apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToAdmin.yml b/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToAdmin.yml new file mode 100644 index 0000000..ecda1c2 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToAdmin.yml @@ -0,0 +1,26 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: + - apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToEdit.yml b/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToEdit.yml new file mode 100644 index 0000000..d39c299 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToEdit.yml @@ -0,0 +1,26 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: + - apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToView.yml b/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToView.yml new file mode 100644 index 0000000..3469014 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoAggregateToView.yml @@ -0,0 +1,21 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: + - apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoEvents.yml b/jvm/src/main/resources/argo/clusterroles/ArgoEvents.yml new file mode 100644 index 0000000..84c62af --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoEvents.yml @@ -0,0 +1,84 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: argo-events-role +rules: + - apiGroups: + - apiextensions.k8s.io + - apiextensions.k8s.io/v1beta1 + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + resources: + - customresourcedefinitions + - apiGroups: + - argoproj.io + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + resources: + - workflows + - workflows/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - gateways + - gateways/finalizers + - sensors + - sensors/finalizers + - eventsources + - eventsources/finalizers + - apiGroups: + - "" + resources: + - pods + - pods/exec + - configmaps + - secrets + - services + - events + - persistentvolumeclaims + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "batch" + resources: + - jobs + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "apps" + resources: + - deployments + verbs: + - create + - get + - list + - watch + - update + - patch + - delete \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToAdmin.yml b/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToAdmin.yml new file mode 100644 index 0000000..a3e3041 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToAdmin.yml @@ -0,0 +1,24 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-events-aggregate-to-admin +rules: + - apiGroups: + - argoproj.io + resources: + - gateways + - gateways/finalizers + - sensors + - sensors/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToEdit.yml b/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToEdit.yml new file mode 100644 index 0000000..6c51aa7 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToEdit.yml @@ -0,0 +1,24 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-events-aggregate-to-edit +rules: + - apiGroups: + - argoproj.io + resources: + - gateways + - gateways/finalizers + - sensors + - sensors/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToView.yml b/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToView.yml new file mode 100644 index 0000000..32093a7 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoEventsAggregateToView.yml @@ -0,0 +1,19 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-events-aggregate-to-view +rules: + - apiGroups: + - argoproj.io + resources: + - gateways + - gateways/finalizers + - sensors + - sensors/finalizers + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoRollouts.yml b/jvm/src/main/resources/argo/clusterroles/ArgoRollouts.yml new file mode 100644 index 0000000..2d0cb3a --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoRollouts.yml @@ -0,0 +1,90 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: rollouts-controller + app.kubernetes.io/name: argo-rollouts-clusterrole + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts-clusterrole +rules: + - apiGroups: + - apps + resources: + - replicasets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - patch + - apiGroups: + - argoproj.io + resources: + - rollouts + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - argoproj.io + resources: + - analysisruns + - experiments + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - argoproj.io + resources: + - analysistemplates + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch + - apiGroups: + - networking.istio.io + resources: + - virtualservices + verbs: + - watch + - get + - update \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToAdmin.yml b/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToAdmin.yml new file mode 100644 index 0000000..55c868d --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToAdmin.yml @@ -0,0 +1,27 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: aggregate-cluster-role + app.kubernetes.io/name: argo-rollouts-aggregate-to-admin + app.kubernetes.io/part-of: argo-rollouts + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-rollouts-aggregate-to-admin +rules: + - apiGroups: + - argoproj.io + resources: + - rollouts + - experiments + - analysistemplates + - analysisruns + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToEdit.yml b/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToEdit.yml new file mode 100644 index 0000000..eaf40ff --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToEdit.yml @@ -0,0 +1,27 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: aggregate-cluster-role + app.kubernetes.io/name: argo-rollouts-aggregate-to-edit + app.kubernetes.io/part-of: argo-rollouts + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-rollouts-aggregate-to-edit +rules: + - apiGroups: + - argoproj.io + resources: + - rollouts + - experiments + - analysistemplates + - analysisruns + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToView.yml b/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToView.yml new file mode 100644 index 0000000..b6dc8ae --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoRolloutsAggregateToView.yml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: aggregate-cluster-role + app.kubernetes.io/name: argo-rollouts-aggregate-to-view + app.kubernetes.io/part-of: argo-rollouts + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-rollouts-aggregate-to-view +rules: + - apiGroups: + - argoproj.io + resources: + - rollouts + - experiments + - analysistemplates + - analysisruns + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/jvm/src/main/resources/argo/clusterroles/ArgoServer.yml b/jvm/src/main/resources/argo/clusterroles/ArgoServer.yml new file mode 100644 index 0000000..e4302b4 --- /dev/null +++ b/jvm/src/main/resources/argo/clusterroles/ArgoServer.yml @@ -0,0 +1,51 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: argo-server-cluster-role +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + verbs: + - get + - list + - watch + - delete + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - apiGroups: + - argoproj.io + resources: + - workflows + - workflowtemplates + - cronworkflows + verbs: + - create + - get + - list + - watch + - update + - patch + - delete \ No newline at end of file diff --git a/jvm/src/main/resources/argo/configmaps/GatewayController.yml b/jvm/src/main/resources/argo/configmaps/GatewayController.yml new file mode 100644 index 0000000..3716631 --- /dev/null +++ b/jvm/src/main/resources/argo/configmaps/GatewayController.yml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: gateway-controller-configmap + namespace: argo +data: + config: | + instanceID: argo-events + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/configmaps/SensorController.yml b/jvm/src/main/resources/argo/configmaps/SensorController.yml new file mode 100644 index 0000000..503d427 --- /dev/null +++ b/jvm/src/main/resources/argo/configmaps/SensorController.yml @@ -0,0 +1,13 @@ +--- +# The sensor-controller configmap includes configuration information for the sensor-controller +# To watch sensors created in different namespace than the controller is deployed in, remove the namespace: argo. +# Similarly to watch sensors created in specific namespace, change to namespace: +apiVersion: v1 +kind: ConfigMap +metadata: + name: sensor-controller-configmap + namespace: argo +data: + config: | + instanceID: argo-events + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/configmaps/WorkflowController.yml b/jvm/src/main/resources/argo/configmaps/WorkflowController.yml new file mode 100644 index 0000000..925512e --- /dev/null +++ b/jvm/src/main/resources/argo/configmaps/WorkflowController.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/AnalysisRuns.yml b/jvm/src/main/resources/argo/customresourcedefinitions/AnalysisRuns.yml new file mode 100644 index 0000000..cc176e5 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/AnalysisRuns.yml @@ -0,0 +1,2727 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: analysisruns.argoproj.io +spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: AnalysisRun status + name: Status + type: string + group: argoproj.io + names: + kind: AnalysisRun + listKind: AnalysisRunList + plural: analysisruns + shortNames: + - ar + singular: analysisrun + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + args: + items: + properties: + name: + type: string + value: + type: string + required: + - name + type: object + type: array + metrics: + items: + properties: + consecutiveErrorLimit: + format: int32 + type: integer + count: + format: int32 + type: integer + failureCondition: + type: string + failureLimit: + format: int32 + type: integer + inconclusiveLimit: + format: int32 + type: integer + initialDelay: + type: string + interval: + type: string + name: + type: string + provider: + properties: + job: + properties: + metadata: + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + backoffLimit: + format: int32 + type: integer + completions: + format: int32 + type: integer + manualSelector: + type: boolean + parallelism: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + overhead: + additionalProperties: + type: string + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + maxSkew: + format: int32 + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + ttlSecondsAfterFinished: + format: int32 + type: integer + required: + - template + type: object + required: + - spec + type: object + kayenta: + properties: + address: + type: string + application: + type: string + canaryConfigName: + type: string + configurationAccountName: + type: string + metricsAccountName: + type: string + scopes: + items: + properties: + controlScope: + properties: + end: + type: string + region: + type: string + scope: + type: string + start: + type: string + step: + type: integer + required: + - end + - region + - scope + - start + - step + type: object + experimentScope: + properties: + end: + type: string + region: + type: string + scope: + type: string + start: + type: string + step: + type: integer + required: + - end + - region + - scope + - start + - step + type: object + name: + type: string + required: + - controlScope + - experimentScope + - name + type: object + type: array + storageAccountName: + type: string + threshold: + properties: + marginal: + type: integer + pass: + type: integer + required: + - marginal + - pass + type: object + required: + - address + - application + - canaryConfigName + - configurationAccountName + - metricsAccountName + - scopes + - storageAccountName + - threshold + type: object + prometheus: + properties: + address: + type: string + query: + type: string + type: object + wavefront: + properties: + address: + type: string + query: + type: string + type: object + web: + properties: + headers: + items: + properties: + key: + type: string + value: + type: string + required: + - key + - value + type: object + type: array + jsonPath: + type: string + timeoutSeconds: + type: integer + url: + type: string + required: + - jsonPath + - url + type: object + type: object + successCondition: + type: string + required: + - name + - provider + type: object + type: array + terminate: + type: boolean + required: + - metrics + type: object + status: + properties: + message: + type: string + metricResults: + items: + properties: + consecutiveError: + format: int32 + type: integer + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + measurements: + items: + properties: + finishedAt: + format: date-time + type: string + message: + type: string + metadata: + additionalProperties: + type: string + type: object + phase: + type: string + resumeAt: + format: date-time + type: string + startedAt: + format: date-time + type: string + value: + type: string + required: + - phase + type: object + type: array + message: + type: string + name: + type: string + phase: + type: string + successful: + format: int32 + type: integer + required: + - name + - phase + type: object + type: array + phase: + type: string + startedAt: + format: date-time + type: string + required: + - phase + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/AnalysisTemplates.yml b/jvm/src/main/resources/argo/customresourcedefinitions/AnalysisTemplates.yml new file mode 100644 index 0000000..b6db760 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/AnalysisTemplates.yml @@ -0,0 +1,2649 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: analysistemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: AnalysisTemplate + listKind: AnalysisTemplateList + plural: analysistemplates + shortNames: + - at + singular: analysistemplate + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + args: + items: + properties: + name: + type: string + value: + type: string + required: + - name + type: object + type: array + metrics: + items: + properties: + consecutiveErrorLimit: + format: int32 + type: integer + count: + format: int32 + type: integer + failureCondition: + type: string + failureLimit: + format: int32 + type: integer + inconclusiveLimit: + format: int32 + type: integer + initialDelay: + type: string + interval: + type: string + name: + type: string + provider: + properties: + job: + properties: + metadata: + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + backoffLimit: + format: int32 + type: integer + completions: + format: int32 + type: integer + manualSelector: + type: boolean + parallelism: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + overhead: + additionalProperties: + type: string + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + maxSkew: + format: int32 + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + ttlSecondsAfterFinished: + format: int32 + type: integer + required: + - template + type: object + required: + - spec + type: object + kayenta: + properties: + address: + type: string + application: + type: string + canaryConfigName: + type: string + configurationAccountName: + type: string + metricsAccountName: + type: string + scopes: + items: + properties: + controlScope: + properties: + end: + type: string + region: + type: string + scope: + type: string + start: + type: string + step: + type: integer + required: + - end + - region + - scope + - start + - step + type: object + experimentScope: + properties: + end: + type: string + region: + type: string + scope: + type: string + start: + type: string + step: + type: integer + required: + - end + - region + - scope + - start + - step + type: object + name: + type: string + required: + - controlScope + - experimentScope + - name + type: object + type: array + storageAccountName: + type: string + threshold: + properties: + marginal: + type: integer + pass: + type: integer + required: + - marginal + - pass + type: object + required: + - address + - application + - canaryConfigName + - configurationAccountName + - metricsAccountName + - scopes + - storageAccountName + - threshold + type: object + prometheus: + properties: + address: + type: string + query: + type: string + type: object + wavefront: + properties: + address: + type: string + query: + type: string + type: object + web: + properties: + headers: + items: + properties: + key: + type: string + value: + type: string + required: + - key + - value + type: object + type: array + jsonPath: + type: string + timeoutSeconds: + type: integer + url: + type: string + required: + - jsonPath + - url + type: object + type: object + successCondition: + type: string + required: + - name + - provider + type: object + type: array + required: + - metrics + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/CronWorkflows.yml b/jvm/src/main/resources/argo/customresourcedefinitions/CronWorkflows.yml new file mode 100644 index 0000000..432d779 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/CronWorkflows.yml @@ -0,0 +1,16 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io +spec: + group: argoproj.io + names: + kind: CronWorkflow + plural: cronworkflows + singular: cronworkflow + shortNames: + - cronwf + - cwf + scope: Namespaced + version: "v1alpha1" \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/EventSources.yml b/jvm/src/main/resources/argo/customresourcedefinitions/EventSources.yml new file mode 100644 index 0000000..0f8e46a --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/EventSources.yml @@ -0,0 +1,16 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: eventsources.argoproj.io +spec: + group: argoproj.io + scope: Namespaced + names: + kind: EventSource + plural: eventsources + singular: eventsource + listKind: EventSourceList + shortNames: + - es + version: "v1alpha1" \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/Experiments.yml b/jvm/src/main/resources/argo/customresourcedefinitions/Experiments.yml new file mode 100644 index 0000000..1bdc254 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/Experiments.yml @@ -0,0 +1,2598 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: experiments.argoproj.io +spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Experiment status + name: Status + type: string + group: argoproj.io + names: + kind: Experiment + listKind: ExperimentList + plural: experiments + shortNames: + - exp + singular: experiment + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + analyses: + items: + properties: + args: + items: + properties: + name: + type: string + value: + type: string + required: + - name + type: object + type: array + name: + type: string + requiredForCompletion: + type: boolean + templateName: + type: string + required: + - name + - templateName + type: object + type: array + duration: + type: string + progressDeadlineSeconds: + format: int32 + type: integer + templates: + items: + properties: + minReadySeconds: + format: int32 + type: integer + name: + type: string + replicas: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + overhead: + additionalProperties: + type: string + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + maxSkew: + format: int32 + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + required: + - name + - selector + - template + type: object + type: array + terminate: + type: boolean + required: + - templates + type: object + status: + properties: + analysisRuns: + items: + properties: + analysisRun: + type: string + message: + type: string + name: + type: string + phase: + type: string + required: + - analysisRun + - name + - phase + type: object + type: array + availableAt: + format: date-time + type: string + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + lastUpdateTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - lastUpdateTime + - message + - reason + - status + - type + type: object + type: array + message: + type: string + phase: + type: string + templateStatuses: + items: + properties: + availableReplicas: + format: int32 + type: integer + collisionCount: + format: int32 + type: integer + lastTransitionTime: + format: date-time + type: string + message: + type: string + name: + type: string + readyReplicas: + format: int32 + type: integer + replicas: + format: int32 + type: integer + status: + type: string + updatedReplicas: + format: int32 + type: integer + required: + - availableReplicas + - name + - readyReplicas + - replicas + - updatedReplicas + type: object + type: array + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/Gateways.yml b/jvm/src/main/resources/argo/customresourcedefinitions/Gateways.yml new file mode 100644 index 0000000..ee735fb --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/Gateways.yml @@ -0,0 +1,16 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: gateways.argoproj.io +spec: + group: argoproj.io + names: + kind: Gateway + listKind: GatewayList + plural: gateways + singular: gateway + shortNames: + - gw + scope: Namespaced + version: "v1alpha1" \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/Rollouts.yml b/jvm/src/main/resources/argo/customresourcedefinitions/Rollouts.yml new file mode 100644 index 0000000..f3b24f1 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/Rollouts.yml @@ -0,0 +1,2802 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rollouts.argoproj.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.replicas + description: Number of desired pods + name: Desired + type: integer + - JSONPath: .status.replicas + description: Total number of non-terminated pods targeted by this rollout + name: Current + type: integer + - JSONPath: .status.updatedReplicas + description: Total number of non-terminated pods targeted by this rollout that + have the desired template spec + name: Up-to-date + type: integer + - JSONPath: .status.availableReplicas + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this rollout + name: Available + type: integer + group: argoproj.io + names: + kind: Rollout + listKind: RolloutList + plural: rollouts + shortNames: + - ro + singular: rollout + scope: Namespaced + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.HPAReplicas + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + minReadySeconds: + format: int32 + type: integer + paused: + type: boolean + progressDeadlineSeconds: + format: int32 + type: integer + replicas: + format: int32 + type: integer + revisionHistoryLimit: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + strategy: + properties: + blueGreen: + properties: + activeService: + type: string + autoPromotionEnabled: + type: boolean + autoPromotionSeconds: + format: int32 + type: integer + previewReplicaCount: + format: int32 + type: integer + previewService: + type: string + scaleDownDelayRevisionLimit: + format: int32 + type: integer + scaleDownDelaySeconds: + format: int32 + type: integer + required: + - activeService + type: object + canary: + properties: + analysis: + properties: + args: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + podTemplateHashValue: + type: string + type: object + required: + - name + type: object + type: array + startingStep: + format: int32 + type: integer + templateName: + type: string + required: + - templateName + type: object + canaryService: + type: string + maxSurge: + anyOf: + - type: string + - type: integer + maxUnavailable: + anyOf: + - type: string + - type: integer + stableService: + type: string + steps: + items: + properties: + analysis: + properties: + args: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + podTemplateHashValue: + type: string + type: object + required: + - name + type: object + type: array + templateName: + type: string + required: + - templateName + type: object + experiment: + properties: + analyses: + items: + properties: + args: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + podTemplateHashValue: + type: string + type: object + required: + - name + type: object + type: array + name: + type: string + templateName: + type: string + required: + - name + - templateName + type: object + type: array + duration: + type: string + templates: + items: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + replicas: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + specRef: + type: string + required: + - name + - specRef + type: object + type: array + required: + - templates + type: object + pause: + properties: + duration: + format: int32 + type: integer + type: object + setWeight: + format: int32 + type: integer + type: object + type: array + trafficRouting: + properties: + istio: + properties: + virtualService: + properties: + name: + type: string + routes: + items: + type: string + type: array + required: + - name + - routes + type: object + required: + - virtualService + type: object + type: object + type: object + type: object + template: + properties: + metadata: + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + overhead: + additionalProperties: + type: string + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + maxSkew: + format: int32 + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + required: + - selector + - template + type: object + status: + properties: + HPAReplicas: + format: int32 + type: integer + abort: + type: boolean + availableReplicas: + format: int32 + type: integer + blueGreen: + properties: + activeSelector: + type: string + previewSelector: + type: string + previousActiveSelector: + type: string + scaleDownDelayStartTime: + format: date-time + type: string + scaleUpPreviewCheckPoint: + type: boolean + type: object + canary: + properties: + currentBackgroundAnalysisRun: + type: string + currentExperiment: + type: string + currentStepAnalysisRun: + type: string + stableRS: + type: string + type: object + collisionCount: + format: int32 + type: integer + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + lastUpdateTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - lastUpdateTime + - message + - reason + - status + - type + type: object + type: array + controllerPause: + type: boolean + currentPodHash: + type: string + currentStepHash: + type: string + currentStepIndex: + format: int32 + type: integer + observedGeneration: + type: string + pauseConditions: + items: + properties: + reason: + type: string + startTime: + format: date-time + type: string + required: + - reason + - startTime + type: object + type: array + readyReplicas: + format: int32 + type: integer + replicas: + format: int32 + type: integer + selector: + type: string + updatedReplicas: + format: int32 + type: integer + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/Sensors.yml b/jvm/src/main/resources/argo/customresourcedefinitions/Sensors.yml new file mode 100644 index 0000000..6ecbff2 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/Sensors.yml @@ -0,0 +1,16 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: sensors.argoproj.io +spec: + group: argoproj.io + names: + kind: Sensor + listKind: SensorList + plural: sensors + singular: sensor + shortNames: + - sn + scope: Namespaced + version: "v1alpha1" \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/WorkflowTemplates.yml b/jvm/src/main/resources/argo/customresourcedefinitions/WorkflowTemplates.yml new file mode 100644 index 0000000..76e78bf --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/WorkflowTemplates.yml @@ -0,0 +1,15 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: workflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + plural: workflowtemplates + singular: workflowtemplate + shortNames: + - wftmpl + scope: Namespaced + version: v1alpha1 \ No newline at end of file diff --git a/jvm/src/main/resources/argo/customresourcedefinitions/Workflows.yml b/jvm/src/main/resources/argo/customresourcedefinitions/Workflows.yml new file mode 100644 index 0000000..1a12072 --- /dev/null +++ b/jvm/src/main/resources/argo/customresourcedefinitions/Workflows.yml @@ -0,0 +1,25 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io +spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Status of the workflow + name: Status + type: string + - JSONPath: .status.startedAt + description: When the workflow was started + format: date-time + name: Age + type: date + group: argoproj.io + names: + kind: Workflow + plural: workflows + singular: workflow + shortNames: + - wf + scope: Namespaced + version: v1alpha1 \ No newline at end of file diff --git a/jvm/src/main/resources/argo/deployments/ArgoRollouts.yml b/jvm/src/main/resources/argo/deployments/ArgoRollouts.yml new file mode 100644 index 0000000..9761eda --- /dev/null +++ b/jvm/src/main/resources/argo/deployments/ArgoRollouts.yml @@ -0,0 +1,33 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: rollouts-controller + app.kubernetes.io/name: argo-rollouts + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts +spec: + selector: + matchLabels: + app.kubernetes.io/name: argo-rollouts + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: argo-rollouts + spec: + containers: + - command: + - /bin/rollouts-controller + image: argoproj/argo-rollouts:v0.7.2 + imagePullPolicy: Always + name: argo-rollouts + volumeMounts: + - mountPath: /tmp + name: tmp + serviceAccountName: argo-rollouts + volumes: + - emptyDir: {} + name: tmp \ No newline at end of file diff --git a/jvm/src/main/resources/argo/deployments/ArgoServer.yml b/jvm/src/main/resources/argo/deployments/ArgoServer.yml new file mode 100644 index 0000000..ba4ad0c --- /dev/null +++ b/jvm/src/main/resources/argo/deployments/ArgoServer.yml @@ -0,0 +1,29 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-server +spec: + selector: + matchLabels: + app: argo-server + template: + metadata: + labels: + app: argo-server + spec: + containers: + - args: + - server + image: argoproj/argocli:v2.6.3 + name: argo-server + ports: + - containerPort: 2746 + readinessProbe: + httpGet: + path: / + port: 2746 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 20 + serviceAccountName: argo-server \ No newline at end of file diff --git a/jvm/src/main/resources/argo/deployments/GatewayController.yml b/jvm/src/main/resources/argo/deployments/GatewayController.yml new file mode 100644 index 0000000..d83d817 --- /dev/null +++ b/jvm/src/main/resources/argo/deployments/GatewayController.yml @@ -0,0 +1,29 @@ +--- +# The gateway-controller listens for changes on the gateway CRD and creates gateway +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gateway-controller + namespace: argo +spec: + replicas: 1 + selector: + matchLabels: + app: gateway-controller + template: + metadata: + labels: + app: gateway-controller + spec: + serviceAccountName: argo-events-sa + containers: + - name: gateway-controller + image: argoproj/gateway-controller:v0.13.0 + imagePullPolicy: Always + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONTROLLER_CONFIG_MAP + value: gateway-controller-configmap \ No newline at end of file diff --git a/jvm/src/main/resources/argo/deployments/SensorController.yml b/jvm/src/main/resources/argo/deployments/SensorController.yml new file mode 100644 index 0000000..11f665e --- /dev/null +++ b/jvm/src/main/resources/argo/deployments/SensorController.yml @@ -0,0 +1,28 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sensor-controller + namespace: argo +spec: + replicas: 1 + selector: + matchLabels: + app: sensor-controller + template: + metadata: + labels: + app: sensor-controller + spec: + serviceAccountName: argo-events-sa + containers: + - name: sensor-controller + image: argoproj/sensor-controller:v0.13.0 + imagePullPolicy: Always + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONTROLLER_CONFIG_MAP + value: sensor-controller-configmap \ No newline at end of file diff --git a/jvm/src/main/resources/argo/deployments/WorkflowController.yml b/jvm/src/main/resources/argo/deployments/WorkflowController.yml new file mode 100644 index 0000000..6679602 --- /dev/null +++ b/jvm/src/main/resources/argo/deployments/WorkflowController.yml @@ -0,0 +1,25 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: workflow-controller +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: + - --configmap + - workflow-controller-configmap + - --executor-image + - argoproj/argoexec:v2.6.3 + command: + - workflow-controller + image: argoproj/workflow-controller:v2.6.3 + name: workflow-controller + serviceAccountName: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/namespaces/Argo.yml b/jvm/src/main/resources/argo/namespaces/Argo.yml new file mode 100644 index 0000000..fb709d2 --- /dev/null +++ b/jvm/src/main/resources/argo/namespaces/Argo.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/rolebindings/Argo.yml b/jvm/src/main/resources/argo/rolebindings/Argo.yml new file mode 100644 index 0000000..1251a12 --- /dev/null +++ b/jvm/src/main/resources/argo/rolebindings/Argo.yml @@ -0,0 +1,12 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-role +subjects: + - kind: ServiceAccount + name: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/rolebindings/ArgoEvents.yml b/jvm/src/main/resources/argo/rolebindings/ArgoEvents.yml new file mode 100644 index 0000000..2124608 --- /dev/null +++ b/jvm/src/main/resources/argo/rolebindings/ArgoEvents.yml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argo-events-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-events-role +subjects: + - kind: ServiceAccount + name: argo-events-sa + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/rolebindings/ArgoRollouts.yml b/jvm/src/main/resources/argo/rolebindings/ArgoRollouts.yml new file mode 100644 index 0000000..168371c --- /dev/null +++ b/jvm/src/main/resources/argo/rolebindings/ArgoRollouts.yml @@ -0,0 +1,16 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: rollouts-controller + app.kubernetes.io/name: argo-rollouts-role-binding + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-rollouts-role +subjects: + - kind: ServiceAccount + name: argo-rollouts \ No newline at end of file diff --git a/jvm/src/main/resources/argo/roles/Argo.yml b/jvm/src/main/resources/argo/roles/Argo.yml new file mode 100644 index 0000000..176b57a --- /dev/null +++ b/jvm/src/main/resources/argo/roles/Argo.yml @@ -0,0 +1,12 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: argo-role +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get \ No newline at end of file diff --git a/jvm/src/main/resources/argo/roles/ArgoEvents.yml b/jvm/src/main/resources/argo/roles/ArgoEvents.yml new file mode 100644 index 0000000..c7fe7aa --- /dev/null +++ b/jvm/src/main/resources/argo/roles/ArgoEvents.yml @@ -0,0 +1,70 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: argo-events-role +rules: + - apiGroups: + - argoproj.io + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + resources: + - workflows + - workflows/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - gateways + - gateways/finalizers + - sensors + - sensors/finalizers + - eventsources + - eventsources/finalizers + - apiGroups: + - "" + resources: + - pods + - pods/exec + - configmaps + - secrets + - services + - events + - persistentvolumeclaims + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "batch" + resources: + - jobs + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "apps" + resources: + - deployments + verbs: + - create + - get + - list + - watch + - update + - patch + - delete \ No newline at end of file diff --git a/jvm/src/main/resources/argo/roles/ArgoRollouts.yml b/jvm/src/main/resources/argo/roles/ArgoRollouts.yml new file mode 100644 index 0000000..2faef54 --- /dev/null +++ b/jvm/src/main/resources/argo/roles/ArgoRollouts.yml @@ -0,0 +1,88 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: rollouts-controller + app.kubernetes.io/name: argo-rollouts-role + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts-role +rules: + - apiGroups: + - apps + resources: + - replicasets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - patch + - apiGroups: + - argoproj.io + resources: + - rollouts + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - argoproj.io + resources: + - analysisruns + - experiments + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - argoproj.io + resources: + - analysistemplates + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get \ No newline at end of file diff --git a/jvm/src/main/resources/argo/serviceaccounts/Argo.yml b/jvm/src/main/resources/argo/serviceaccounts/Argo.yml new file mode 100644 index 0000000..8dcb36b --- /dev/null +++ b/jvm/src/main/resources/argo/serviceaccounts/Argo.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/serviceaccounts/ArgoEvents.yml b/jvm/src/main/resources/argo/serviceaccounts/ArgoEvents.yml new file mode 100644 index 0000000..ec829e5 --- /dev/null +++ b/jvm/src/main/resources/argo/serviceaccounts/ArgoEvents.yml @@ -0,0 +1,7 @@ +# All argo-events services are bound to the "argo-events" service account. +# In RBAC enabled setups, this SA is bound to specific roles. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-events-sa + namespace: argo \ No newline at end of file diff --git a/jvm/src/main/resources/argo/serviceaccounts/ArgoRollouts.yml b/jvm/src/main/resources/argo/serviceaccounts/ArgoRollouts.yml new file mode 100644 index 0000000..8b8312a --- /dev/null +++ b/jvm/src/main/resources/argo/serviceaccounts/ArgoRollouts.yml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rollouts-controller + app.kubernetes.io/name: argo-rollouts + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts \ No newline at end of file diff --git a/jvm/src/main/resources/argo/serviceaccounts/ArgoServer.yml b/jvm/src/main/resources/argo/serviceaccounts/ArgoServer.yml new file mode 100644 index 0000000..781de15 --- /dev/null +++ b/jvm/src/main/resources/argo/serviceaccounts/ArgoServer.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-server \ No newline at end of file diff --git a/jvm/src/main/resources/argo/services/ArgoRollouts.yml b/jvm/src/main/resources/argo/services/ArgoRollouts.yml new file mode 100644 index 0000000..c136638 --- /dev/null +++ b/jvm/src/main/resources/argo/services/ArgoRollouts.yml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argo-rollouts-metrics + app.kubernetes.io/part-of: argo-rollouts + name: argo-rollouts-metrics +spec: + ports: + - name: metrics + port: 8090 + protocol: TCP + targetPort: 8090 + selector: + app.kubernetes.io/name: argo-rollouts \ No newline at end of file diff --git a/jvm/src/main/resources/argo/services/ArgoServer.yml b/jvm/src/main/resources/argo/services/ArgoServer.yml new file mode 100644 index 0000000..619c830 --- /dev/null +++ b/jvm/src/main/resources/argo/services/ArgoServer.yml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: argo-server +spec: + ports: + - port: 2746 + targetPort: 2746 + selector: + app: argo-server \ No newline at end of file diff --git a/jvm/src/main/resources/infinispan-kubernetes.xml b/jvm/src/main/resources/infinispan-kubernetes.xml new file mode 100644 index 0000000..c1ff0f6 --- /dev/null +++ b/jvm/src/main/resources/infinispan-kubernetes.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/jvm/src/main/resources/infinispan.xml b/jvm/src/main/resources/infinispan.xml new file mode 100644 index 0000000..8c8f9ff --- /dev/null +++ b/jvm/src/main/resources/infinispan.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + diff --git a/jvm/src/main/resources/log4j2.yml b/jvm/src/main/resources/log4j2.yml new file mode 100644 index 0000000..382c582 --- /dev/null +++ b/jvm/src/main/resources/log4j2.yml @@ -0,0 +1,37 @@ +Configutation: + name: Default + Properties: + Property: + name: log-path + value: "logs" + Appenders: + Console: + name: ConsoleAppender + target: SYSTEM_OUT + PatternLayout: + pattern: "%highlight{%d{HH:mm:ss} | %c:%line |} %style{%msg%n%throwable}{white}" + File: + name: FileAppender + fileName: ${log-path}/logfile.log + PatternLayout: + pattern: "[%-5level] %d{yyyy-MM-dd HH:mm:ss} %c - %msg%n" + Loggers: + Root: + level: debug + AppenderRef: + - ref: ConsoleAppender + Logger: + - name: com.harana.modules.okhttp + level: error + - name: io.netty + level: error + - name: io.vertx + level: info + - name: org.redisson + level: warn + - name: org.mongodb + level: warn + - name: skuber.api + level: warn + - name: org.apache.commons.vfs2 + level: error diff --git a/jvm/src/main/resources/pack/linux/pack b/jvm/src/main/resources/pack/linux/pack new file mode 100644 index 0000000..e69de29 diff --git a/jvm/src/main/resources/pack/mac/pack b/jvm/src/main/resources/pack/mac/pack new file mode 100644 index 0000000..e69de29 diff --git a/jvm/src/main/resources/pack/windows/pack.exe b/jvm/src/main/resources/pack/windows/pack.exe new file mode 100644 index 0000000..e69de29 diff --git a/jvm/src/main/resources/public/templates/temp.hbs b/jvm/src/main/resources/public/templates/temp.hbs new file mode 100644 index 0000000..cffa2ef --- /dev/null +++ b/jvm/src/main/resources/public/templates/temp.hbs @@ -0,0 +1 @@ +sadf \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/models/PluginInfo.scala b/jvm/src/main/scala/com/harana/models/PluginInfo.scala new file mode 100644 index 0000000..03a6f4d --- /dev/null +++ b/jvm/src/main/scala/com/harana/models/PluginInfo.scala @@ -0,0 +1,5 @@ +package com.harana.models + +case class PluginInfo(name: String, + vendor: String, + version: Long) diff --git a/jvm/src/main/scala/com/harana/models/PluginServiceInfo.scala b/jvm/src/main/scala/com/harana/models/PluginServiceInfo.scala new file mode 100644 index 0000000..0ee9da0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/models/PluginServiceInfo.scala @@ -0,0 +1,3 @@ +package com.harana.models + +case class PluginServiceInfo() diff --git a/jvm/src/main/scala/com/harana/modules/Layers.scala b/jvm/src/main/scala/com/harana/modules/Layers.scala new file mode 100644 index 0000000..22f549d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/Layers.scala @@ -0,0 +1,78 @@ +package com.harana.modules + +import com.harana.modules.airbyte.LiveAirbyte +import com.harana.modules.airtable.LiveAirtable +import com.harana.modules.alluxiofs.LiveAlluxioFs +import com.harana.modules.argo.LiveArgo +import com.harana.modules.aws.LiveAWS +import com.harana.modules.aws_s3.LiveAwsS3 +import com.harana.modules.clearbit.LiveClearbit +import com.harana.modules.core.http.LiveHttp +import com.harana.modules.core.{Layers => CoreLayers} +import com.harana.modules.docker.LiveDocker +import com.harana.modules.email.LiveEmail +import com.harana.modules.file.LiveFile +import com.harana.modules.handlebars.LiveHandlebars +import com.harana.modules.kubernetes.LiveKubernetes +import com.harana.modules.mixpanel.LiveMixpanel +import com.harana.modules.ohc.LiveOHC +import com.harana.modules.shopify.LiveShopify +import com.harana.modules.stripe._ +import com.harana.modules.thumbnailator.LiveThumbnailator +import com.harana.modules.vertx.LiveVertx +import com.harana.modules.zendesk.LiveZendesk +import zio.Clock + +object Layers { + + lazy val http = CoreLayers.standard >>> LiveHttp.layer + + lazy val airtable = CoreLayers.standard >>> LiveAirtable.layer + lazy val alluxioFs = CoreLayers.standard >>> LiveAlluxioFs.layer + lazy val aws = CoreLayers.standard >>> LiveAWS.layer + lazy val awsS3 = CoreLayers.standard >>> LiveAwsS3.layer + lazy val clearbit = (CoreLayers.standard ++ http) >>> LiveClearbit.layer + lazy val docker = CoreLayers.standard ++ http >>> LiveDocker.layer + lazy val email = CoreLayers.standard >>> LiveEmail.layer + lazy val file = CoreLayers.standard >>> LiveFile.layer + lazy val handlebars = CoreLayers.standard >>> LiveHandlebars.layer + lazy val kubernetes = CoreLayers.standard >>> LiveKubernetes.layer + lazy val mixpanel = CoreLayers.standard >>> LiveMixpanel.layer + lazy val ohc = LiveOHC.layer + lazy val shopify = CoreLayers.standard ++ http >>> LiveShopify.layer + lazy val thumbnailator = LiveThumbnailator.layer + lazy val vertx = CoreLayers.standard >>> LiveVertx.layer + lazy val zendesk = CoreLayers.standard >>> LiveZendesk.layer + + lazy val airbyte = (CoreLayers.standard ++ kubernetes) >>> LiveAirbyte.layer + lazy val argo = (CoreLayers.standard ++ kubernetes) >>> LiveArgo.layer + + lazy val stripeAccounts = CoreLayers.standard >>> LiveStripeAccounts.layer + lazy val stripeApplicationFeeRefunds = CoreLayers.standard >>> LiveStripeApplicationFeeRefunds.layer + lazy val stripeApplicationFees = CoreLayers.standard >>> LiveStripeApplicationFees.layer + lazy val stripeBalance = CoreLayers.standard >>> LiveStripeBalance.layer + lazy val stripeCharges = CoreLayers.standard >>> LiveStripeCharges.layer + lazy val stripeCountrySpecs = CoreLayers.standard >>> LiveStripeCountrySpecs.layer + lazy val stripeCoupons = CoreLayers.standard >>> LiveStripeCoupons.layer + lazy val stripeCustomerBankAccount = CoreLayers.standard >>> LiveStripeCustomerBankAccounts.layer + lazy val stripeCustomerCreditCards = CoreLayers.standard >>> LiveStripeCustomerCreditCards.layer + lazy val stripeCustomers = CoreLayers.standard >>> LiveStripeCustomers.layer + lazy val stripeDiscounts = CoreLayers.standard >>> LiveStripeDiscounts.layer + lazy val stripeDisputes = CoreLayers.standard >>> LiveStripeDisputes.layer + lazy val stripeEvents = CoreLayers.standard >>> LiveStripeEvents.layer + lazy val stripeExternalBankAccounts = CoreLayers.standard >>> LiveStripeExternalBankAccounts.layer + lazy val stripeExternalCreditCards = CoreLayers.standard >>> LiveStripeExternalCreditCards.layer + lazy val stripeInvoiceItems = CoreLayers.standard >>> LiveStripeInvoiceItems .layer + lazy val stripeInvoices = CoreLayers.standard >>> LiveStripeInvoices.layer + lazy val stripePlans = CoreLayers.standard >>> LiveStripePlans.layer + lazy val stripePrices = CoreLayers.standard >>> LiveStripePrices.layer + lazy val stripeProducts = CoreLayers.standard >>> LiveStripeProducts.layer + lazy val stripeRefunds = CoreLayers.standard >>> LiveStripeRefunds.layer + lazy val stripeSubscriptionItems = CoreLayers.standard >>> LiveStripeSubscriptionItems.layer + lazy val stripeSubscriptions = CoreLayers.standard >>> LiveStripeSubscriptions.layer + lazy val stripeTokens = CoreLayers.standard >>> LiveStripeTokens.layer + lazy val stripeTransferReversals = CoreLayers.standard >>> LiveStripeTransferReversals.layer + lazy val stripeTransfers = CoreLayers.standard >>> LiveStripeTransfers.layer + lazy val stripeUI = CoreLayers.standard ++ http >>> LiveStripeUI.layer + +} diff --git a/jvm/src/main/scala/com/harana/modules/airbyte/Airbyte.scala b/jvm/src/main/scala/com/harana/modules/airbyte/Airbyte.scala new file mode 100644 index 0000000..833c855 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airbyte/Airbyte.scala @@ -0,0 +1,18 @@ +package com.harana.modules.airbyte + +import io.airbyte.protocol.models.{AirbyteCatalog, AirbyteConnectionStatus, ConfiguredAirbyteCatalog} +import zio.macros.accessible +import zio.{Task, UIO} + +@accessible +trait Airbyte { + + def integrations: Task[List[AirbyteIntegration]] + + def discover(integrationName: String, connectionValues: Map[String, Object]): Task[AirbyteCatalog] + + def check(integrationName: String, connectionValues: Map[String, Object]): Task[AirbyteConnectionStatus] + + def read(integrationName: String, catalog: ConfiguredAirbyteCatalog): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/airbyte/AirbyteIntegration.scala b/jvm/src/main/scala/com/harana/modules/airbyte/AirbyteIntegration.scala new file mode 100644 index 0000000..36db90f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airbyte/AirbyteIntegration.scala @@ -0,0 +1,64 @@ +package com.harana.modules.airbyte + +import enumeratum.values.{StringCirceEnum, StringEnum, StringEnumEntry} + +import java.lang.{String => JString} + +sealed trait AirbyteOption +object AirbyteOption { + case class Boolean(b: Boolean) extends AirbyteOption + case class Integer(i: Int) extends AirbyteOption + case class Object(title: JString, + description: Option[JString], + properties: List[AirbyteProperty]) extends AirbyteOption + case class String(s: JString) extends AirbyteOption +} + +sealed abstract class AirbyteSyncDirection(val value: String) extends StringEnumEntry +object AirbyteSyncDirection extends StringEnum[AirbyteSyncDirection] with StringCirceEnum[AirbyteSyncDirection] { + case object Source extends AirbyteSyncDirection("source") + case object Destination extends AirbyteSyncDirection("destination") + val values = findValues +} + +sealed abstract class AirbyteSyncMode(val value: String) extends StringEnumEntry +object AirbyteSyncMode extends StringEnum[AirbyteSyncMode] with StringCirceEnum[AirbyteSyncMode] { + case object Append extends AirbyteSyncMode("append") + case object AppendDeduplicate extends AirbyteSyncMode("append_dedup") + case object Overwrite extends AirbyteSyncMode("ovewrite") + val values = findValues +} + +sealed abstract class AirbytePropertyType(val value: String) extends StringEnumEntry +object AirbytePropertyType extends StringEnum[AirbytePropertyType] with StringCirceEnum[AirbytePropertyType] { + case object Boolean extends AirbytePropertyType("boolean") + case object Integer extends AirbytePropertyType("integer") + case object List extends AirbytePropertyType("") + case object Object extends AirbytePropertyType("object") + case object String extends AirbytePropertyType("string") + case object Array extends AirbytePropertyType("array") + val values = findValues +} + +case class AirbyteProperty(name: String, + `type`: AirbytePropertyType, + title: Option[String] = None, + description: Option[String] = None, + placeholder: Option[String] = None, + required: Boolean = false, + validationPattern: Option[String] = None, + default: Option[Either[String, Int]] = None, + options: List[AirbyteOption] = List(), + multiline: Boolean = false, + minimum: Option[Int] = None, + maximum: Option[Int] = None, + order: Option[Int] = None, + secret: Boolean = false) + +case class AirbyteIntegration(name: String, + properties: List[AirbyteProperty], + syncDirection: AirbyteSyncDirection, + supportsDBT: Boolean, + supportsIncremental: Boolean, + supportsNormalization: Boolean, + supportedSyncModes: List[AirbyteSyncMode]) diff --git a/jvm/src/main/scala/com/harana/modules/airbyte/LiveAirbyte.scala b/jvm/src/main/scala/com/harana/modules/airbyte/LiveAirbyte.scala new file mode 100644 index 0000000..9810b64 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airbyte/LiveAirbyte.scala @@ -0,0 +1,99 @@ +package com.harana.modules.airbyte + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import io.airbyte.protocol.models.{AirbyteCatalog, AirbyteConnectionStatus, ConfiguredAirbyteCatalog} +import io.circe.parser._ +import skuber._ +import zio.{Task, ZIO, ZLayer} + +object LiveAirbyte { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAirbyte(config, kubernetes, logger, micrometer) + } +} + +case class LiveAirbyte(config: Config, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends Airbyte { + + def integrations: Task[List[AirbyteIntegration]] = + for { + files <- ZIO.succeed(airbyteFiles) + jsons = files.view.mapValues(parse).filter(_._2.isRight).mapValues(_.toOption.get) + integrations = jsons.map { j => toAirbyteIntegration(j._1, j._2)}.toList + } yield integrations + + + def check(integrationName: String, connectionValues: Map[String, Object]): Task[AirbyteConnectionStatus] = + null + + def discover(integrationName: String, connectionValues: Map[String, Object]): Task[AirbyteCatalog] = + null + + + def read(integrationName: String, catalog: ConfiguredAirbyteCatalog): Task[Unit] = + null + + + private def run(integrationName: String, + prefix: String, + namespace: String, + s3StorageClassName: String, + s3Endpoint: String, + s3Bucket: String, + s3Path: String, + s3AccessKeyId: String, + s3SecretAccessKey: String) = + for { + client <- kubernetes.newClient + + secret = Secret(metadata = ObjectMeta(name = s"$prefix-secret", namespace = namespace), + data = Map("accessKeyID" -> s3AccessKeyId.getBytes, "secretAccessKey" -> s3SecretAccessKey.getBytes, "endpoint" -> s3Endpoint.getBytes) + ) + +// pv = PersistentVolume(metadata = ObjectMeta(name = s"$prefix-volume", namespace = namespace), +// spec = Some(PersistentVolume.Spec( +// accessModes = List(AccessMode.ReadWriteMany), +// capacity = Map(Resource.storage -> Quantity("10Gi")), +// claimRef = Some(ObjectReference(name = s"$prefix-pvc", namespace = namespace)), +// storageClassName = Some(ObjectReference(name = "csi-s3")), +// storageClassName = Some("csi-s3"), +// source = GenericVolumeSource( +// Map( +// "csi" -> Map( +// "driver" -> s"ru.yandex.s3.csi".asJson, +// "controllerPublishSecretRef" -> Map("name" -> "csi-s3-secret", "namespace" -> namespace).asJson, +// "nodePublishSecretRef" -> Map("name" -> "csi-s3-secret", "namespace" -> namespace).asJson, +// "nodeStageSecretRef" -> Map("name" -> "csi-s3-secret", "namespace" -> namespace).asJson, +// "volumeAttributes" -> Map("capacity" -> "10Gi", "mounter" -> "geesfs").asJson, +// "volumeHandle" -> s"$s3Bucket/$s3Path".asJson +// ).asJson +// ).asJson.noSpaces +// ) +// )) +// ) +// +// pvc = PersistentVolumeClaim(metadata = ObjectMeta(name = s"$prefix-volume", namespace = namespace), +// spec = Some(PersistentVolumeClaim.Spec( +// accessModes = List(AccessMode.ReadWriteMany), +// resources = Some(Resource.Requirements(requests = Map(Resource.storage -> Quantity("10Gi")))) +// )) +// ) +// +// volumeMount = Volume.Mount(name = "user-home", mountPath = "/home/harana", subPath = claims.userId) +// containerSpec = Container(name = name, image = app.image, imagePullPolicy = Some(Container.PullPolicy.Always), volumeMounts = List(volumeMount)).exposePort(app.httpPort) +// volume = Volume(name = "user-home", Volume.PersistentVolumeClaimRef("user-home")) +// +// podSpec = Pod.Spec(imagePullSecrets = List(LocalObjectReference("aws-registry"))) +// pod = Pod(name = podName, spec = podSpec) +// _ <- kubernetes.create(client, namespace, pod).ignore +// _ <- kubernetes.create(client, podNamespace, pod).ignore + + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/airbyte/package.scala b/jvm/src/main/scala/com/harana/modules/airbyte/package.scala new file mode 100644 index 0000000..1b7726c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airbyte/package.scala @@ -0,0 +1,170 @@ +package com.harana.modules + +import io.airbyte.api.client.model.generated._ +import io.airbyte.commons.enums.Enums +import io.airbyte.commons.text.Names +import io.airbyte.protocol.models.{ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, AirbyteCatalog => ProtocolAirbyteCatalog, AirbyteStream => ProtocolAirbyteStream, DestinationSyncMode => ProtocolDestinationSyncMode, SyncMode => ProtocolSyncMode} +import io.circe.{ACursor, HCursor, Json} +import org.apache.commons.io.IOUtils + +import java.io.File +import java.nio.charset.StandardCharsets +import java.util.jar.JarFile +import scala.jdk.CollectionConverters._ + +package object airbyte { + + def toAirbyteIntegration(fileName: String, json: Json): AirbyteIntegration = { + val connectionSpec = json.hcursor.downField("connectionSpecification") + val required = connectionSpec.downField("required").as[List[String]].getOrElse(List()) + + val properties = + connectionSpec.downField("properties").keys.getOrElse(List()).map { p => + toAirbyteProperty(p, connectionSpec.downField("properties").downField(p), required) + } + + val name = fileName.substring(fileName.indexOf("-") + 1, fileName.length) + val syncDirection = if (fileName.contains("source")) AirbyteSyncDirection.Source else AirbyteSyncDirection.Destination + val supportsDBT = json.hcursor.downField("supportsDBT").as[Boolean].getOrElse(false) + val supportsIncremental = json.hcursor.downField("supportsIncremental").as[Boolean].getOrElse(false) + val supportsNormalization = json.hcursor.downField("supportsNormalization").as[Boolean].getOrElse(false) + + AirbyteIntegration(name, properties.toList, syncDirection, supportsDBT, supportsIncremental, supportsNormalization, List()) + } + + def toAirbyteProperty(name: String, cursor: ACursor, required: List[String]): AirbyteProperty = { + val `type` = AirbytePropertyType.withValue(cursor.downField("type").as[String].getOrElse("")) + + AirbyteProperty( + name = name, + `type` = `type`, + title = cursor.downField("title").as[String].toOption, + description = cursor.downField("description").as[String].toOption, + placeholder = cursor.downField("examples").as[List[String]].getOrElse(List()).headOption, + required = required.contains(name), + validationPattern = cursor.downField("pattern").as[String].toOption, + multiline = cursor.downField("multiline").as[Boolean].toOption.getOrElse(false), + minimum = cursor.downField("minimum").as[Int].toOption, + maximum = cursor.downField("maximum").as[Int].toOption, + order = cursor.downField("order").as[Int].toOption, + secret = cursor.downField("airbyte_secret").as[Boolean].toOption.getOrElse(false), + options = `type` match { + case AirbytePropertyType.Integer => + cursor.downField("enum").as[List[Int]].getOrElse(List()).map(AirbyteOption.Integer) + + case AirbytePropertyType.Object => + cursor.downField("oneOf").downArray.values.getOrElse(List()).map { o => + toAirbyteOption(o.hcursor) + }.toList + + case AirbytePropertyType.String => + cursor.downField("enum").as[List[String]].getOrElse(List()).map(AirbyteOption.String) + + case AirbytePropertyType.Array => + List() + + case _ => List() + } + ) + } + + def toAirbyteOption(cursor: HCursor): AirbyteOption.Object = { + val title = cursor.downField("title").as[String].toOption.get + val description = cursor.downField("description").as[String].toOption + val required = cursor.downField("required").as[List[String]].getOrElse(List()) + + val properties = cursor.downField("properties").values.getOrElse(List()).map { p => + toAirbyteProperty("", p.hcursor, required) + } + + AirbyteOption.Object(title, description, properties.toList) + } + + implicit def toApi(stream: ProtocolAirbyteStream): AirbyteStream = + new AirbyteStream() + .name(stream.getName) + .jsonSchema(stream.getJsonSchema) + .supportedSyncModes(Enums.convertListTo(stream.getSupportedSyncModes, classOf[SyncMode])) + .sourceDefinedCursor(stream.getSourceDefinedCursor) + .defaultCursorField(stream.getDefaultCursorField) + .sourceDefinedPrimaryKey(stream.getSourceDefinedPrimaryKey) + .namespace(stream.getNamespace) + + + implicit def toProtocol(stream: AirbyteStream): ProtocolAirbyteStream = + new ProtocolAirbyteStream() + .withName(stream.getName) + .withJsonSchema(stream.getJsonSchema) + .withSupportedSyncModes(Enums.convertListTo(stream.getSupportedSyncModes, classOf[ProtocolSyncMode])) + .withSourceDefinedCursor(stream.getSourceDefinedCursor) + .withDefaultCursorField(stream.getDefaultCursorField) + .withSourceDefinedPrimaryKey(stream.getSourceDefinedPrimaryKey) + .withNamespace(stream.getNamespace) + + + implicit def toApi(catalog: ProtocolAirbyteCatalog): AirbyteCatalog = + new AirbyteCatalog() + .streams(catalog.getStreams.asScala.map(toApi) + .map(s => new AirbyteStreamAndConfiguration().stream(s).config(generateDefaultConfiguration(s))).asJava) + + + implicit def toApi(catalog: ConfiguredAirbyteCatalog): AirbyteCatalog = + new AirbyteCatalog().streams(catalog.getStreams.asScala.map(configuredStream => + new AirbyteStreamAndConfiguration() + .stream(toApi(configuredStream.getStream)) + .config(new AirbyteStreamConfiguration() + .syncMode(Enums.convertTo(configuredStream.getSyncMode, classOf[SyncMode])) + .cursorField(configuredStream.getCursorField) + .destinationSyncMode(Enums.convertTo(configuredStream.getDestinationSyncMode, classOf[DestinationSyncMode])) + .primaryKey(configuredStream.getPrimaryKey) + .aliasName(Names.toAlphanumericAndUnderscore(configuredStream.getStream.getName)) + .selected(true) + ) + ).asJava) + + + implicit def toProtocol(catalog: AirbyteCatalog): ConfiguredAirbyteCatalog = + new ConfiguredAirbyteCatalog().withStreams(catalog + .getStreams + .asScala + .filter(_.getConfig.getSelected) + .map(s => new ConfiguredAirbyteStream() + .withStream(toProtocol(s.getStream)) + .withSyncMode(Enums.convertTo(s.getConfig.getSyncMode, classOf[ProtocolSyncMode])) + .withCursorField(s.getConfig.getCursorField) + .withDestinationSyncMode(Enums.convertTo(s.getConfig.getDestinationSyncMode, classOf[ProtocolDestinationSyncMode])) + .withPrimaryKey(s.getConfig.getPrimaryKey) + ) + .asJava) + + + private def generateDefaultConfiguration(stream: AirbyteStream) = { + val result = new AirbyteStreamConfiguration() + .aliasName(Names.toAlphanumericAndUnderscore(stream.getName)) + .cursorField(stream.getDefaultCursorField) + .destinationSyncMode(DestinationSyncMode.APPEND) + .primaryKey(stream.getSourceDefinedPrimaryKey).selected(true) + + if (stream.getSupportedSyncModes.size > 0) result.setSyncMode(stream.getSupportedSyncModes.get(0)) + else result.setSyncMode(SyncMode.INCREMENTAL) + + result + } + + val jarFile = new File(getClass.getProtectionDomain.getCodeSource.getLocation.getPath) + + def airbyteFiles: Map[String, String] = { + val files = if (jarFile.isFile) { + val jar = new JarFile(jarFile) + val jarFiles = jar.entries.asScala.filter(_.getName.startsWith("airbyte/")).map(f => s"/${f.getName}") + jarFiles.toList + } else { + val url = classOf[Airbyte].getResource("/airbyte") + new File(url.toURI).listFiles().map(_.getName).toList + } + files.map(f => + f.replace("/", "").replace("airbyte", "").replace(".json", "") -> + IOUtils.toString(classOf[Airbyte].getResourceAsStream(f), StandardCharsets.UTF_8.name) + ).toMap + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/airtable/Airtable.scala b/jvm/src/main/scala/com/harana/modules/airtable/Airtable.scala new file mode 100644 index 0000000..a5eecab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airtable/Airtable.scala @@ -0,0 +1,27 @@ +package com.harana.modules.airtable + +import dev.fuxing.airtable.AirtableRecord +import dev.fuxing.airtable.AirtableTable.{PaginationList, QuerySpec} +import zio.Task +import zio.macros.accessible + +import java.util + +@accessible +trait Airtable { + + def tableIterator(base: String, table: String, query: QuerySpec): Task[util.Iterator[AirtableRecord]] + + def listTable(base: String, table: String, query: QuerySpec): Task[PaginationList] + + def getRecord(base: String, table: String, id: String): Task[AirtableRecord] + + def createRecords(base: String, table: String, records: List[AirtableRecord]): Task[Unit] + + def patchRecords(base: String, table: String, records: List[AirtableRecord]): Task[Unit] + + def replaceRecords(base: String, table: String, records: List[AirtableRecord]): Task[Unit] + + def deleteRecords(base: String, table: String, recordIds: List[String]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/airtable/LiveAirtable.scala b/jvm/src/main/scala/com/harana/modules/airtable/LiveAirtable.scala new file mode 100644 index 0000000..57006e5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airtable/LiveAirtable.scala @@ -0,0 +1,89 @@ +package com.harana.modules.airtable + +import com.harana.modules.airtable.LiveAirtable.clientRef +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import dev.fuxing.airtable.AirtableTable.{PaginationList, QuerySpec} +import dev.fuxing.airtable.{AirtableApi, AirtableRecord} +import zio.{Task, ZIO, ZLayer} + +import java.util +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + + +object LiveAirtable { + + private val clientRef = new AtomicReference[Option[AirtableApi]](None) + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAirtable(config, logger, micrometer) + } +} + +case class LiveAirtable(config: Config, logger: Logger, micrometer: Micrometer) extends Airtable { + + private def client = + for { + client <- if (clientRef.get.nonEmpty) ZIO.attempt(clientRef.get.get) else + for { + key <- config.secret("airtable-key") + api <- ZIO.attempt(new AirtableApi(key)) + } yield api + _ = clientRef.set(Some(client)) + } yield client + + + def tableIterator(base: String, table: String, query: QuerySpec): Task[util.Iterator[AirtableRecord]] = + for { + api <- client + iterator = api.base(base).table(table).iterator(query) + } yield iterator + + + def listTable(base: String, table: String, query: QuerySpec): Task[PaginationList] = + for { + api <- client + list <- ZIO.attempt(api.base(base).table(table).list(query)) + } yield list + + + def getRecord(base: String, table: String, id: String): Task[AirtableRecord] = + for { + api <- client + record = api.base(base).table(table).get(id) + } yield record + + + def createRecords(base: String, table: String, records: List[AirtableRecord]): Task[Unit] = + for { + api <- client + _ <- ZIO.attempt(api.base(base).table(table).post(records.asJava)) + } yield () + + + def patchRecords(base: String, table: String, records: List[AirtableRecord]): Task[Unit] = + for { + api <- client + _ <- ZIO.attempt(api.base(base).table(table).patch(records.asJava)) + } yield () + + + def replaceRecords(base: String, table: String, records: List[AirtableRecord]): Task[Unit] = + for { + api <- client + _ <- ZIO.attempt(api.base(base).table(table).put(records.asJava)) + } yield () + + + def deleteRecords(base: String, table: String, recordIds: List[String]): Task[Unit] = + for { + api <- client + _ <- ZIO.attempt(api.base(base).table(table).delete(recordIds.asJava)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/airtable/models/Attachment.scala b/jvm/src/main/scala/com/harana/modules/airtable/models/Attachment.scala new file mode 100644 index 0000000..e400250 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airtable/models/Attachment.scala @@ -0,0 +1,8 @@ +package com.harana.modules.airtable.models + +case class Attachment(id: String, + url: String, + filename: String, + size: Float, + `type`: String) + //thumbnails: Map[String, Thumbnail]) diff --git a/jvm/src/main/scala/com/harana/modules/airtable/models/Deletion.scala b/jvm/src/main/scala/com/harana/modules/airtable/models/Deletion.scala new file mode 100644 index 0000000..be1d166 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airtable/models/Deletion.scala @@ -0,0 +1,3 @@ +package com.harana.modules.airtable.models + +case class Deletion(deleted: Boolean, id: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/airtable/models/Error.scala b/jvm/src/main/scala/com/harana/modules/airtable/models/Error.scala new file mode 100644 index 0000000..29121c7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/airtable/models/Error.scala @@ -0,0 +1,3 @@ +package com.harana.modules.airtable.models + +case class Error(`type`: String, message: String) diff --git a/jvm/src/main/scala/com/harana/modules/alertmanager/AlertManager.scala b/jvm/src/main/scala/com/harana/modules/alertmanager/AlertManager.scala new file mode 100644 index 0000000..d98677f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alertmanager/AlertManager.scala @@ -0,0 +1,41 @@ +package com.harana.modules.alertmanager + +import zio.Task +import zio.macros.accessible + +@accessible +trait AlertManager { + + def start(name: String, + storageClassName: String, + replicas: Int = 1): Task[Unit] + + def healthy: Task[Boolean] + def ready: Task[Boolean] + def reload: Task[Unit] + + def status: Task[AlertManagerStatus] + + def receivers: Task[List[ReceiverName]] + + def silences(filters: Set[String] = Set()): Task[List[Silence]] + def silence(id: SilenceId): Task[Silence] + def saveSilence(silence: PostableSilence): Task[SilenceId] + def deleteSilence(id: SilenceId): Task[Unit] + + def alerts(active: Boolean = false, + silenced: Boolean = false, + inhibited: Boolean = false, + unprocessed: Boolean = false, + filters: List[String] = List(), + receiver: Option[String] = None): Task[List[Alert]] + + def saveAlerts(alerts: List[PostableAlert]): Task[Unit] + + def alertGroups(active: Boolean = false, + silenced: Boolean = false, + inhibited: Boolean = false, + filters: List[String] = List(), + receiver: Option[String] = None): Task[List[AlertGroup]] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alertmanager/DateTime.java b/jvm/src/main/scala/com/harana/modules/alertmanager/DateTime.java new file mode 100644 index 0000000..7d834e4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alertmanager/DateTime.java @@ -0,0 +1,343 @@ +package com.harana.modules.alertmanager; + +import java.io.Serializable; +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Immutable representation of a date with an optional time and an optional time zone based on RFC 3339. + * + *

    + * Implementation is immutable and therefore thread-safe. + *

    + * + * @since 1.0 + * @author Yaniv Inbar + */ +public final class DateTime implements Serializable { + + private static final long serialVersionUID = 1L; + + private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); + + /** Regular expression for parsing RFC3339 date/times. */ + private static final Pattern RFC3339_PATTERN = Pattern.compile( + "^(\\d{4})-(\\d{2})-(\\d{2})" // yyyy-MM-dd + + "([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d+)?)?" // 'T'HH:mm:ss.milliseconds + + "([Zz]|([+-])(\\d{2}):(\\d{2}))?"); // 'Z' or time zone shift HH:mm following '+' or '-' + + /** + * Date/time value expressed as the number of ms since the Unix epoch. + * + *

    + * If the time zone is specified, this value is normalized to UTC, so to format this date/time + * value, the time zone shift has to be applied. + *

    + */ + private final long value; + + /** Specifies whether this is a date-only value. */ + private final boolean dateOnly; + + /** Time zone shift from UTC in minutes or {@code 0} for date-only value. */ + private final int tzShift; + + /** + * Instantiates {@link DateTime} from a {@link Date} and {@link TimeZone}. + * + * @param date date and time + * @param zone time zone; if {@code null}, it is interpreted as {@code TimeZone.getDefault()}. + */ + public DateTime(Date date, TimeZone zone) { + this(false, date.getTime(), zone == null ? null : zone.getOffset(date.getTime()) / 60000); +} + + /** + * Instantiates {@link DateTime} from the number of milliseconds since the Unix epoch. + * + *

    + * The time zone is interpreted as {@code TimeZone.getDefault()}, which may vary with + * implementation. + *

    + * + * @param value number of milliseconds since the Unix epoch (January 1, 1970, 00:00:00 GMT) + */ + public DateTime(long value) { + this(false, value, null); +} + + /** + * Instantiates {@link DateTime} from a {@link Date}. + * + *

    + * The time zone is interpreted as {@code TimeZone.getDefault()}, which may vary with + * implementation. + *

    + * + * @param value date and time + */ + public DateTime(Date value) { + this(value.getTime()); +} + + /** + * Instantiates {@link DateTime} from the number of milliseconds since the Unix epoch, and a shift + * from UTC in minutes. + * + * @param value number of milliseconds since the Unix epoch (January 1, 1970, 00:00:00 GMT) + * @param tzShift time zone, represented by the number of minutes off of UTC. + */ + public DateTime(long value, int tzShift) { + this(false, value, tzShift); +} + + /** + * Instantiates {@link DateTime}, which may represent a date-only value, from the number of + * milliseconds since the Unix epoch, and a shift from UTC in minutes. + * + * @param dateOnly specifies if this should represent a date-only value + * @param value number of milliseconds since the Unix epoch (January 1, 1970, 00:00:00 GMT) + * @param tzShift time zone, represented by the number of minutes off of UTC, or {@code null} for + * {@code TimeZone.getDefault()}. + */ + public DateTime(boolean dateOnly, long value, Integer tzShift) { + this.dateOnly = dateOnly; + this.value = value; + this.tzShift = + dateOnly ? 0 : tzShift == null ? TimeZone.getDefault().getOffset(value) / 60000 : tzShift; +} + + /** + * Instantiates {@link DateTime} from an RFC 3339 + * date/time value. + * + *

    + * Upgrade warning: in prior version 1.17, this method required milliseconds to be exactly 3 + * digits (if included), and did not throw an exception for all types of invalid input values, but + * starting in version 1.18, the parsing done by this method has become more strict to enforce + * that only valid RFC3339 strings are entered, and if not, it throws a + * {@link NumberFormatException}. Also, in accordance with the RFC3339 standard, any number of + * milliseconds digits is now allowed. + *

    + * + * @param value an RFC 3339 date/time value. + * @since 1.11 + */ + public DateTime(String value) { + // Note, the following refactoring is being considered: Move the implementation of parseRfc3339 + // into this constructor. Implementation of parseRfc3339 can then do + // "return new DateTime(str);". + DateTime dateTime = parseRfc3339(value); + this.dateOnly = dateTime.dateOnly; + this.value = dateTime.value; + this.tzShift = dateTime.tzShift; +} + + /** + * Returns the date/time value expressed as the number of milliseconds since the Unix epoch. + * + *

    + * If the time zone is specified, this value is normalized to UTC, so to format this date/time + * value, the time zone shift has to be applied. + *

    + * + * @since 1.5 + */ + public long getValue() { + return value; +} + + /** + * Returns whether this is a date-only value. + * + * @since 1.5 + */ + public boolean isDateOnly() { + return dateOnly; +} + + /** + * Returns the time zone shift from UTC in minutes or {@code 0} for date-only value. + * + * @since 1.5 + */ + public int getTimeZoneShift() { + return tzShift; +} + + /** Formats the value as an RFC 3339 date/time string. */ + public String toStringRfc3339() { + StringBuilder sb = new StringBuilder(); + Calendar dateTime = new GregorianCalendar(GMT); + long localTime = value + (tzShift * 60000L); + dateTime.setTimeInMillis(localTime); + // date + appendInt(sb, dateTime.get(Calendar.YEAR), 4); + sb.append('-'); + appendInt(sb, dateTime.get(Calendar.MONTH) + 1, 2); + sb.append('-'); + appendInt(sb, dateTime.get(Calendar.DAY_OF_MONTH), 2); + if (!dateOnly) { + // time + sb.append('T'); + appendInt(sb, dateTime.get(Calendar.HOUR_OF_DAY), 2); + sb.append(':'); + appendInt(sb, dateTime.get(Calendar.MINUTE), 2); + sb.append(':'); + appendInt(sb, dateTime.get(Calendar.SECOND), 2); + + if (dateTime.isSet(Calendar.MILLISECOND)) { + sb.append('.'); + appendInt(sb, dateTime.get(Calendar.MILLISECOND), 3); +} + // time zone + if (tzShift == 0) { + sb.append('Z'); +} else { + int absTzShift = tzShift; + if (tzShift > 0) { + sb.append('+'); +} else { + sb.append('-'); + absTzShift = -absTzShift; +} + + int tzHours = absTzShift / 60; + int tzMinutes = absTzShift % 60; + appendInt(sb, tzHours, 2); + sb.append(':'); + appendInt(sb, tzMinutes, 2); +} +} + return sb.toString(); +} + + @Override + public String toString() { + return toStringRfc3339(); +} + + /** + * {@inheritDoc} + * + *

    + * A check is added that the time zone is the same. If you ONLY want to check equality of time + * value, check equality on the {@link #getValue()}. + *

    + */ + @Override + public boolean equals(Object o) { + if (o == this) { + return true; +} + if (!(o instanceof DateTime)) { + return false; +} + DateTime other = (DateTime) o; + return dateOnly == other.dateOnly && value == other.value && tzShift == other.tzShift; +} + + @Override + public int hashCode() { + return Arrays.hashCode(new long[] {value, dateOnly ? 1 : 0, tzShift}); +} + + /** + * Parses an RFC3339 date/time value. + * + *

    + * Upgrade warning: in prior version 1.17, this method required milliseconds to be exactly 3 + * digits (if included), and did not throw an exception for all types of invalid input values, but + * starting in version 1.18, the parsing done by this method has become more strict to enforce + * that only valid RFC3339 strings are entered, and if not, it throws a + * {@link NumberFormatException}. Also, in accordance with the RFC3339 standard, any number of + * milliseconds digits is now allowed. + *

    + * + *

    + * For the date-only case, the time zone is ignored and the hourOfDay, minute, second, and + * millisecond parameters are set to zero. + *

    + * + * @param str Date/time string in RFC3339 format + * @throws NumberFormatException if {@code str} doesn't match the RFC3339 standard format; an + * exception is thrown if {@code str} doesn't match {@code RFC3339_REGEX} or if it + * contains a time zone shift but no time. + */ + public static DateTime parseRfc3339(String str) throws NumberFormatException { + Matcher matcher = RFC3339_PATTERN.matcher(str); + if (!matcher.matches()) { + throw new NumberFormatException("Invalid date/time format: " + str); +} + + int year = Integer.parseInt(matcher.group(1)); // yyyy + int month = Integer.parseInt(matcher.group(2)) - 1; // MM + int day = Integer.parseInt(matcher.group(3)); // dd + boolean isTimeGiven = matcher.group(4) != null; // 'T'HH:mm:ss.milliseconds + String tzShiftRegexGroup = matcher.group(9); // 'Z', or time zone shift HH:mm following '+'/'-' + boolean isTzShiftGiven = tzShiftRegexGroup != null; + int hourOfDay = 0; + int minute = 0; + int second = 0; + int milliseconds = 0; + Integer tzShiftInteger = null; + + if (isTzShiftGiven && !isTimeGiven) { + throw new NumberFormatException("Invalid date/time format, cannot specify time zone shift" + + " without specifying time: " + str); +} + + if (isTimeGiven) { + hourOfDay = Integer.parseInt(matcher.group(5)); // HH + minute = Integer.parseInt(matcher.group(6)); // mm + second = Integer.parseInt(matcher.group(7)); // ss + if (matcher.group(8) != null) { // contains .milliseconds? + milliseconds = Integer.parseInt(matcher.group(8).substring(1)); // milliseconds + // The number of digits after the dot may not be 3. Need to renormalize. + int fractionDigits = matcher.group(8).substring(1).length() - 3; + milliseconds = (int) ((float) milliseconds / Math.pow(10, fractionDigits)); +} +} + Calendar dateTime = new GregorianCalendar(GMT); + dateTime.set(year, month, day, hourOfDay, minute, second); + dateTime.set(Calendar.MILLISECOND, milliseconds); + long value = dateTime.getTimeInMillis(); + + if (isTimeGiven && isTzShiftGiven) { + int tzShift; + if (Character.toUpperCase(tzShiftRegexGroup.charAt(0)) == 'Z') { + tzShift = 0; +} else { + tzShift = Integer.parseInt(matcher.group(11)) * 60 // time zone shift HH + + Integer.parseInt(matcher.group(12)); // time zone shift mm + if (matcher.group(10).charAt(0) == '-') { // time zone shift + or - + tzShift = -tzShift; +} + value -= tzShift * 60000L; // e.g. if 1 hour ahead of UTC, subtract an hour to get UTC time +} + tzShiftInteger = tzShift; +} + return new DateTime(!isTimeGiven, value, tzShiftInteger); +} + + /** Appends a zero-padded number to a string builder. */ + private static void appendInt(StringBuilder sb, int num, int numDigits) { + if (num < 0) { + sb.append('-'); + num = -num; +} + int x = num; + while (x > 0) { + x /= 10; + numDigits--; +} + for (int i = 0; i < numDigits; i++) { + sb.append('0'); +} + if (num != 0) { + sb.append(num); +} +} +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alertmanager/LiveAlertManager.scala b/jvm/src/main/scala/com/harana/modules/alertmanager/LiveAlertManager.scala new file mode 100644 index 0000000..3092088 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alertmanager/LiveAlertManager.scala @@ -0,0 +1,199 @@ +package com.harana.modules.alertmanager + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import io.circe.parser._ +import io.circe.syntax._ +import io.circe.{Decoder, Encoder} +import skuber.Container.Port +import skuber.apps.StatefulSet +import skuber.{ConfigMap, Container, EnvVar, HTTPGetAction, LabelSelector, ObjectMeta, PersistentVolume, PersistentVolumeClaim, Pod, Probe, Protocol, Service, Volume} +import zio.{Task, ZIO, ZLayer} + +object LiveAlertManager { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAlertManager(config, http, kubernetes, logger, micrometer) + } +} + +case class LiveAlertManager(config: Config, http: Http, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends AlertManager { + + override def start(name: String, + storageClassName: String, + replicas: Int = 1) = + for { + version <- config.string("alertmanager.version") + config <- alertManagerConfig(name) + + replicaArgs = if (replicas > 1) + List( + "--cluster.advertise-address=$(POD_IP):9094", + "--cluster.listen-address=0.0.0.0:9094" + ) ++ (0 to replicas).map(i => s"--cluster.peer=$name-{{ $i }}.$name-headless:9094") + else List() + + configMap = ConfigMap(name).withData(Map("alertmanager.yml" -> config)) + service = Service(name, Map("app.kubernetes.io/name" -> name), 9094) + probe = Probe(HTTPGetAction(port = Right("http"), path = "/")) + statefulSet = StatefulSet(name) + .withReplicas(replicas) + .withServiceName(name) + .withLabelSelector(LabelSelector()) + .withVolumeClaimTemplate( + PersistentVolumeClaim( + metadata = ObjectMeta(name = "storage"), + spec = Some(PersistentVolumeClaim.Spec( + accessModes = List(PersistentVolume.AccessMode.ReadWriteOnce), + storageClassName = Some(storageClassName) + )) + ) + ) + .withTemplate(Pod.Template.Spec( + metadata = ObjectMeta(labels = Map()), + spec = Some(Pod.Spec( + nodeSelector = Map("type" -> "core"), + containers = List(Container( + name = "alertmanager", + image = s"quay.io/prometheus/alertmanager:$version", + ports = List(Port(9093, Protocol.TCP, "http")), + env = List(EnvVar("POD_IP", EnvVar.FieldRef("status.podIP", "v1"))), + livenessProbe = Some(probe), + readinessProbe = Some(probe), + args = replicaArgs ++ List( + "--storage.path=/alertmanager", + "--config.file=/etc/alertmanager/alertmanager.yml" + ), + volumeMounts = List( + Volume.Mount("config", "/etc/alertmanager"), + Volume.Mount("storage", "/alertmanager") + ) + )) + )) + )) + } yield () + + + private def alertManagerConfig(name: String): Task[String] = + ZIO.attempt("") + + + def healthy: Task[Boolean] = + for { + domain <- config.string("alertmanager.host") + response <- http.get(s"https://$domain/-/healthy").mapError(ex => new Exception(ex.toString)) + } yield response.code() == 200 + + + def ready: Task[Boolean] = + for { + domain <- config.string("alertmanager.host") + response <- http.get(s"https://$domain/-/ready").mapError(ex => new Exception(ex.toString)) + } yield response.code() == 200 + + + def reload: Task[Unit] = + post("-/reload") + + + def status: Task[AlertManagerStatus] = + get[AlertManagerStatus]("api/v2/status") + + + def receivers: Task[List[ReceiverName]] = + get[List[ReceiverName]]("api/v2/receivers") + + + def silences(filters: Set[String] = Set()): Task[List[Silence]] = + get[List[Silence]]("api/v2/silences") + + + def silence(id: SilenceId): Task[Silence] = + get[Silence](s"api/v2/silences/${id.silenceID}") + + + def saveSilence(silence: PostableSilence): Task[SilenceId] = + postWithResponse[PostableSilence, SilenceId]("api/v2/silences", Some(silence)) + + + def deleteSilence(id: SilenceId): Task[Unit] = + delete(s"api/v2/silence/${id.silenceID}") + + + def alerts(active: Boolean = false, + silenced: Boolean = false, + inhibited: Boolean = false, + unprocessed: Boolean = false, + filters: List[String] = List(), + receiver: Option[String] = None): Task[List[Alert]] = + get[List[Alert]](s"api/v2/alerts", + Map( + "active" -> List(active.toString), + "silenced" -> List(silenced.toString), + "inhibited" -> List(inhibited.toString), + "unprocessed" -> List(unprocessed.toString), + "filters" -> filters) ++ (if (receiver.nonEmpty) Map("receiver" -> List(receiver.get)) else Map()) + ) + + def saveAlerts(alerts: List[PostableAlert]): Task[Unit] = + postWithBody[List[PostableAlert]]("api/v2/alerts", Some(alerts)) + + + def alertGroups(active: Boolean = false, + silenced: Boolean = false, + inhibited: Boolean = false, + filters: List[String] = List(), + receiver: Option[String] = None): Task[List[AlertGroup]] = + get[List[AlertGroup]]("api/v2/alerts/group", + Map( + "active" -> List(active.toString), + "silenced" -> List(silenced.toString), + "inhibited" -> List(inhibited.toString), + "filters" -> filters) ++ (if (receiver.nonEmpty) Map("receiver" -> List(receiver.get)) else Map()) + ) + + + private def get[A](url: String, parameters: Map[String, List[String]] = Map())(implicit d: Decoder[A]): Task[A] = + for { + domain <- config.string("alertmanager.host") + json <- http.getAsJson(s"https://$domain/$url", parameters).mapError(ex => new Exception(ex.toString)) + obj <- ZIO.fromEither(decode[A](json.noSpaces)) + } yield obj + + + private def post(url: String, parameters: Map[String, List[String]] = Map()): Task[Unit] = + for { + domain <- config.string("alertmanager.host") + _ <- http.postAsJson(s"https://$domain/$url", params = parameters).mapError(ex => new Exception(ex.toString)) + } yield () + + + private def postWithBody[A](url: String, obj: Option[A], parameters: Map[String, List[String]] = Map())(implicit e: Encoder[A]): Task[Unit] = + for { + domain <- config.string("alertmanager.host") + _ <- http.postAsJson(s"https://$domain/$url", body = obj.map(_.asJson.noSpaces), params = parameters).mapError(ex => new Exception(ex.toString)) + } yield () + + + private def postWithResponse[A, B](url: String, obj: Option[A], parameters: Map[String, List[String]] = Map())(implicit d: Decoder[B], e: Encoder[A]): Task[B] = + for { + domain <- config.string("alertmanager.host") + json <- http.postAsJson(s"https://$domain/$url", body = obj.map(_.asJson.noSpaces), params = parameters).mapError(ex => new Exception(ex.toString)) + obj <- ZIO.fromEither(decode[B](json.noSpaces)) + } yield obj + + + private def delete[A](url: String, parameters: Map[String, List[String]] = Map()): Task[Unit] = + for { + domain <- config.string("alertmanager.host") + _ <- http.deleteAsJson(s"https://$domain/$url", parameters).mapError(ex => new Exception(ex.toString)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alertmanager/models.scala b/jvm/src/main/scala/com/harana/modules/alertmanager/models.scala new file mode 100644 index 0000000..bcb3c7a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alertmanager/models.scala @@ -0,0 +1,104 @@ +package com.harana.modules + +import io.circe.generic.JsonCodec +import io.circe.{Decoder, Encoder} + +package object alertmanager { + + implicit val decodeDateTime: Decoder[DateTime] = + Decoder.decodeString.emap { str => Right(DateTime.parseRfc3339(str)) } + + implicit val encodeDateTime: Encoder[DateTime] = + Encoder.encodeString.contramap[DateTime](_.toStringRfc3339) + + + @JsonCodec + case class Alert(labels: Map[String, String], + annotations: Map[String, String], + receivers: List[ReceiverName], + status: AlertStatus, + fingerprint: String, + updatedAt: DateTime, + startsAt: DateTime, + endsAt: DateTime, + generatorURL: String) + + @JsonCodec + case class AlertGroup(labels: Map[String, String], + receiver: ReceiverName, + alerts: List[Alert]) + + @JsonCodec + case class AlertManagerConfig(original: String) + + @JsonCodec + case class AlertManagerStatus(cluster: ClusterStatus, + config: AlertManagerConfig, + uptime: DateTime, + versionInfo: VersionInfo) + + @JsonCodec + case class AlertStatus(inhibitedBy: List[String], + silencedBy: List[String], + state: String) + + @JsonCodec + case class ClusterStatus(name: String, + peers: List[Peer], + status: String) + + @JsonCodec + case class Labels(alertname: String, + labels: Map[String, String]) + + @JsonCodec + case class Matcher(regex: Boolean, + name: String, + value: String) + + @JsonCodec + case class Peer(address: String, + name: String) + + @JsonCodec + case class PostableAlert(labels: Map[String, String], + annotations: Map[String, String], + startsAt: String, + endsAt: String, + generatorURL: String) + + @JsonCodec + case class PostableSilence(id: String, + comment: String, + createdBy: String, + endsAt: String, + startsAt: String, + matchers: List[Matcher]) + + @JsonCodec + case class ReceiverName(name: String) + + @JsonCodec + case class Silence(id: String, + status: SilenceStatus, + updatedAt: DateTime, + comment: String, + createdBy: String, + endsAt: DateTime, + startsAt: DateTime, + matchers: List[Matcher]) + + @JsonCodec + case class SilenceId(silenceID: String) + + @JsonCodec + case class SilenceStatus(state: String) + + @JsonCodec + case class VersionInfo(branch: String, + buildDate: String, + buildUser: String, + goVersion: String, + revision: String, + version: String) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alluxiofs/AlluxioFs.scala b/jvm/src/main/scala/com/harana/modules/alluxiofs/AlluxioFs.scala new file mode 100644 index 0000000..93b4e19 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alluxiofs/AlluxioFs.scala @@ -0,0 +1,70 @@ +package com.harana.modules.alluxiofs + +import alluxio.grpc.SetAclAction +import alluxio.security.authorization.AclEntry +import com.harana.sdk.shared.models.HaranaFile +import zio.Task +import zio.macros.accessible + +@accessible +trait AlluxioFs { + + def createDirectory(path: String, + createParent: Boolean, + username: Option[String] = None): Task[Unit] + +// def createFile(path: String, +// data: Array[Byte], +// username: Option[String] = None, +// blockSize: Option[Int] = None): Task[Unit] + + def delete(path: String, + recursive: Boolean, + username: Option[String] = None): Task[Unit] + + def exists(path: String, + username: Option[String] = None): Task[Boolean] + + def free(path: String, + username: Option[String] = None): Task[Unit] + + def info(path: String, + username: Option[String] = None): Task[HaranaFile] + + def isDirectory(path: String, + username: Option[String] = None): Task[Boolean] + + def isFile(path: String, + username: Option[String] = None): Task[Boolean] + + def list(path: String, + username: Option[String] = None): Task[List[HaranaFile]] + +// def loadFile(path: String, +// username: Option[String] = None): Task[Array[Byte]] + + def mount(path: String, + ufsPath: String, + username: Option[String] = None): Task[Unit] + + def parent(path: String, + username: Option[String] = None): Task[Option[String]] + + def persist(path: String, + username: Option[String] = None): Task[Unit] + + def rename(source: String, + destination: String, + username: Option[String] = None): Task[Unit] + + def search(path: String, query: String): Task[List[HaranaFile]] + + def setAcl(path: String, + action: SetAclAction, + entries: List[AclEntry], + username: Option[String] = None): Task[Unit] + + def unmount(path: String, + username: Option[String] = None): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alluxiofs/LiveAlluxioFs.scala b/jvm/src/main/scala/com/harana/modules/alluxiofs/LiveAlluxioFs.scala new file mode 100644 index 0000000..381afce --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alluxiofs/LiveAlluxioFs.scala @@ -0,0 +1,210 @@ +package com.harana.modules.alluxiofs + +import alluxio.client.file.URIStatus +import alluxio.conf.{Configuration, PropertyKey} +import alluxio.grpc.{CreateDirectoryPOptions, DeletePOptions, SetAclAction} +import alluxio.security.authorization.AclEntry +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.HaranaFile +import org.apache.commons.io.IOUtils +import zio.{Task, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveAlluxioFs { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAlluxioFs(config, logger, micrometer) + } +} + +case class LiveAlluxioFs(config: Config, logger: Logger, micrometer: Micrometer) extends AlluxioFs { + + private val alluxioProperties = for { + hosts <- config.listString("alluxio.hosts", List()) + port <- config.long("alluxio.port", 19998) + properties = Configuration.global().copyProperties() + addresses = hosts.map(host => s"$host:$port").mkString(",") + _ = properties.set(PropertyKey.SECURITY_AUTHENTICATION_TYPE, "NOSASL") + _ = properties.set(PropertyKey.MASTER_RPC_ADDRESSES, addresses) + _ <- logger.info(s"Connecting to hosts: $addresses") + } yield properties + + + def createDirectory(path: String, + createParent: Boolean, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + options = CreateDirectoryPOptions.newBuilder().setRecursive(createParent).build() + _ <- ZIO.attempt(fs.createDirectory(uri(path), options)) + } yield () + +// FIXME +// def createFile(path: String, +// data: Array[Byte], +// username: Option[String] = None, +// blockSize: Option[Int] = None): Task[Unit] = +// for { +// properties <- alluxioProperties +// fs <- alluxioFs(properties, username) +// _ <- ZIO.attempt(fs.createFile(uri(path))).acquireReleaseWith(closeStream)(os => io(os.write(data)) +// ) +// } yield () + + + def delete(path: String, + recursive: Boolean, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + options = DeletePOptions.newBuilder().setRecursive(true).build() + _ <- ZIO.attempt(fs.delete(uri(path), options)) + } yield () + + + def exists(path: String, + username: Option[String] = None): Task[Boolean] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.exists(uri(path))) + } yield result + + + def free(path: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.free(uri(path))) + } yield result + + + def info(path: String, + username: Option[String] = None): Task[HaranaFile] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.listStatus(uri(path)).asScala.map(toDataFile).head) + } yield result + + + def isDirectory(path: String, + username: Option[String] = None): Task[Boolean] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.getStatus(uri(path)).isFolder) + } yield result + + + def isFile(path: String, username: Option[String] = None): Task[Boolean] = + isDirectory(path).map(!_) + + + def list(path: String, + username: Option[String] = None): Task[List[HaranaFile]] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.listStatus(uri(path)).asScala.toList.map(toDataFile)) + } yield result + + +// FIXME +// def loadFile(path: String, +// username: Option[String] = None): Task[Array[Byte]] = +// for { +// properties <- alluxioProperties +// fs <- alluxioFs(properties, username) +// result <- ZIO.attempt(fs.openFile(uri(path))).acquireReleaseWithAuto(closeStream)(is => io(IOUtils.toByteArray(is))) +// } yield result + + + def mount(path: String, + ufsPath: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.mount(uri(path), uri(ufsPath))) + } yield () + + + def parent(path: String, + username: Option[String] = None): Task[Option[String]] = + io(Option(uri(path).getParent).map(_.getPath)) + + + def persist(path: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.persist(uri(path))) + } yield () + + + def rename(source: String, + destination: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.rename(uri(source), uri(destination))) + } yield () + + + def search(path: String, + query: String): Task[List[HaranaFile]] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties) +// _ <- ZIO.from(fs.rename(uri(source), uri(destination))) + } yield List() + + + def setAcl(path: String, + action: SetAclAction, + entries: List[AclEntry], + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.setAcl(uri(path), action, entries.asJava)) + } yield () + + + def unmount(path: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.unmount(uri(path))) + } yield () + + + private def toDataFile(uri: URIStatus): HaranaFile = { + null + } + // private def toDataFile(uri: URIStatus) = { +// uri. +// +// DataFile( +// name = file.getName.getBaseName, +// extension = if (StringUtils.isEmpty(file.getName.getExtension)) None else Some(file.getName.getExtension), +// isFolder = file.isFolder, +// lastModified = Instant.ofEpochMilli(file.getContent.getLastModifiedTime), +// size = size(file), +// tags = List() +// ) +// } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alluxiofs/package.scala b/jvm/src/main/scala/com/harana/modules/alluxiofs/package.scala new file mode 100644 index 0000000..0bcebc4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alluxiofs/package.scala @@ -0,0 +1,40 @@ +package com.harana.modules + +import alluxio.AlluxioURI +import alluxio.client.file.FileSystem +import alluxio.conf.{AlluxioProperties, InstancedConfiguration, PropertyKey} +import alluxio.exception.AlluxioException +import zio.{IO, ZIO} + +import java.io.{InputStream, OutputStream} + +package object alluxiofs { + + def alluxioFs(properties: AlluxioProperties, username: Option[String] = None) = + ZIO.succeed { + FileSystem.Factory.create( + username match { + case Some(u) => + val p = properties.copy() + p.set(PropertyKey.SECURITY_LOGIN_USERNAME, u) + new InstancedConfiguration(p) + + case None => + new InstancedConfiguration(properties) + } + ) + } + + def closeStream(is: InputStream) = + ZIO.succeed(is.close()) + + def closeStream(os: OutputStream) = + ZIO.succeed(os.close()) + + def io[A](fn: => A): IO[AlluxioException, A] = + ZIO.from(fn).refineToOrDie[AlluxioException] + + def uri(path: String) = + new AlluxioURI(path) + +} diff --git a/jvm/src/main/scala/com/harana/modules/argo/Argo.scala b/jvm/src/main/scala/com/harana/modules/argo/Argo.scala new file mode 100644 index 0000000..db54a28 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/Argo.scala @@ -0,0 +1,35 @@ +package com.harana.modules.argo + +import com.harana.modules.argo.events.EventSource.EventSource +import com.harana.modules.argo.events.Rollout.Rollout +import com.harana.modules.argo.events.Sensor.Sensor +import com.harana.modules.argo.workflows.Workflow.Workflow +import com.harana.modules.argo.workflows.WorkflowTemplate.WorkflowTemplate +import skuber.api.client.KubernetesClient +import zio.Task +import zio.macros.accessible + +@accessible +trait Argo { + + def createOrUpdateEventSource(namespace: String, eventSource: EventSource, client: Option[KubernetesClient] = None): Task[EventSource] + def deleteEventSource(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsEventSource(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateRollout(namespace: String, rollout: Rollout, client: Option[KubernetesClient] = None): Task[Rollout] + def deleteRollout(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsRollout(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateSensor(namespace: String, sensor: Sensor, client: Option[KubernetesClient] = None): Task[Sensor] + def deleteSensor(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsSensor(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateWorkflow(namespace: String, workflow: Workflow, client: Option[KubernetesClient] = None): Task[Workflow] + def deleteWorkflow(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsWorkflow(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateWorkflowTemplate(namespace: String, template: WorkflowTemplate, client: Option[KubernetesClient] = None): Task[WorkflowTemplate] + def deleteWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/LiveArgo.scala b/jvm/src/main/scala/com/harana/modules/argo/LiveArgo.scala new file mode 100644 index 0000000..36bf442 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/LiveArgo.scala @@ -0,0 +1,126 @@ +package com.harana.modules.argo + +import com.harana.modules.argo.events.EventSource._ +import com.harana.modules.argo.events.Rollout._ +import com.harana.modules.argo.events.Sensor._ +import com.harana.modules.argo.workflows.Workflow._ +import com.harana.modules.argo.workflows.WorkflowTemplate._ +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import play.api.libs.json.Format +import skuber.ResourceDefinition +import skuber.api.client.{KubernetesClient, LoggingContext} +import zio.{Task, ZIO, ZLayer} + +import scala.reflect.ClassTag + +object LiveArgo { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveArgo(config, kubernetes, logger, micrometer) + } +} + +case class LiveArgo(config: Config, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends Argo { + + def createOrUpdateEventSource(namespace: String, eventSource: EventSource, client: Option[KubernetesClient]): Task[EventSource] = + upsert[EventSource](namespace, eventSource, client) + + + def deleteEventSource(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[EventSource](namespace, name, client) + + + def existsEventSource(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[EventSource](namespace, name, client) + + + def createOrUpdateRollout(namespace: String, rollout: Rollout, client: Option[KubernetesClient]): Task[Rollout] = + upsert[Rollout](namespace, rollout, client) + + + def deleteRollout(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[Rollout](namespace, name, client) + + + def existsRollout(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[Rollout](namespace, name, client) + + + def createOrUpdateSensor(namespace: String, sensor: Sensor, client: Option[KubernetesClient]): Task[Sensor] = + upsert[Sensor](namespace, sensor, client) + + + def deleteSensor(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[Sensor](namespace, name, client) + + + def existsSensor(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[Sensor](namespace, name, client) + + + def createOrUpdateWorkflow(namespace: String, workflow: Workflow, client: Option[KubernetesClient]): Task[Workflow] = + upsert[Workflow](namespace, workflow, client) + + + def deleteWorkflow(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[Workflow](namespace, name, client) + + + def existsWorkflow(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[Workflow](namespace, name, client) + + + def createOrUpdateWorkflowTemplate(namespace: String, template: WorkflowTemplate, client: Option[KubernetesClient]): Task[WorkflowTemplate] = + upsert[WorkflowTemplate](namespace, template, client) + + + def deleteWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[WorkflowTemplate](namespace, name, client) + + + def existsWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[WorkflowTemplate](namespace, name, client) + + + private def upsert[A <: skuber.ObjectResource](namespace: String, resource: A, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[A] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + exists <- kubernetes.exists[A](client, namespace, resource.name) + resource <- if (exists) kubernetes.create[A](client, namespace, resource) else kubernetes.update[A](client, namespace, resource) + _ <- kubernetes.close(client) + } yield resource + + + private def rename[A <: skuber.ObjectResource](namespace: String, oldName: String, newName: String, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[Unit] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + exists <- kubernetes.exists[A](client, namespace, oldName) + resource <- kubernetes.get[A](client, namespace, oldName) + _ <- ZIO.when(exists)(kubernetes.delete[A](client, namespace, oldName)) + _ <- ZIO.when(exists)(kubernetes.create[A](client, namespace, resource.get)) + _ <- kubernetes.close(client) + } yield () + + + private def delete[A <: skuber.ObjectResource](namespace: String, name: String, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[Unit] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + _ <- kubernetes.delete[A](client, namespace, name) + _ <- kubernetes.close(client) + } yield () + + + private def exists[A <: skuber.ObjectResource](namespace: String, name: String, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[Boolean] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + exists <- kubernetes.get[A](client, namespace, name).map(_.nonEmpty) + } yield exists + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/EventSource.scala b/jvm/src/main/scala/com/harana/modules/argo/events/EventSource.scala new file mode 100644 index 0000000..3062554 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/EventSource.scala @@ -0,0 +1,204 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import io.circe.generic.JsonCodec +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object EventSource { + + type EventSource = CustomResource[Spec, Status] + type EventSourceList = ListResource[EventSource] + + implicit lazy val calendarFmt: Format[Calendar] = Json.format[Calendar] + implicit lazy val catchupConfigurationFmt: Format[CatchupConfiguration] = Json.format[CatchupConfiguration] + implicit lazy val configMapPersistenceFmt: Format[ConfigMapPersistence] = Json.format[ConfigMapPersistence] + implicit lazy val eventPersistenceFmt: Format[EventPersistence] = Json.format[EventPersistence] + implicit lazy val eventSourceFilterFmt: Format[EventSourceFilter] = Json.format[EventSourceFilter] + implicit lazy val fileFmt: Format[File] = Json.format[File] + implicit lazy val genericFmt: Format[Generic] = Json.format[Generic] + implicit lazy val githubFmt: Format[Github] = Json.format[Github] + implicit lazy val gitlabFmt: Format[Gitlab] = Json.format[Gitlab] + implicit lazy val hdfsFmt: Format[Hdfs] = Json.format[Hdfs] + implicit lazy val kafkaFmt: Format[Kafka] = Json.format[Kafka] + implicit lazy val redisFmt: Format[Redis] = Json.format[Redis] + implicit lazy val resourceFmt: Format[Resource] = Json.format[Resource] + implicit lazy val slackFmt: Format[Slack] = Json.format[Slack] + implicit lazy val snsFmt: Format[SNS] = Json.format[SNS] + implicit lazy val specFmt: Format[Spec] = Json.format[Spec] + implicit lazy val sqsFmt: Format[SQS] = Json.format[SQS] + implicit lazy val stripeFmt: Format[Stripe] = Json.format[Stripe] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val watchPathConfigFmt: Format[WatchPathConfig] = Json.format[WatchPathConfig] + implicit lazy val webhookFmt: Format[Webhook] = Json.format[Webhook] + + implicit lazy val resourceDefinition: ResourceDefinition[EventSource] = ResourceDefinition[EventSource]("EventSource", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[EventSource] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + @JsonCodec + case class Calendar(exclusionDates: List[String], + interval: Option[String] = None, + schedule: Option[String] = None, + timezone: Option[String] = None, + metadata: Map[String, String] = Map(), + eventPersistence: Option[EventPersistence] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class CatchupConfiguration(enabled: Boolean, + maxDuration: String) + + @JsonCodec + case class ConfigMapPersistence(name: String, + createIfNotExist: Boolean) + + @JsonCodec + case class EventPersistence(catchupConfiguration: CatchupConfiguration, + configMapPersistence: ConfigMapPersistence) + + @JsonCodec + case class EventSourceFilter(expression: String) + + + @JsonCodec + case class File(eventType: String, + watchPathConfig: WatchPathConfig, + polling: Boolean = false, + metadata: Map[String, String] = Map(), + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Generic(url: String, + config: Option[String] = None, + insecure: Boolean = false, + jsonBody: Boolean = true, + metadata: Map[String, String] = Map(), + authSecret: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Github(owner: String, + repository: String, + endpoint: String, + port: Long, + url: Option[String] = None, + events: List[String], + apiSecret: String, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Gitlab(projectId: String, + endpoint: String, + port: Long, + url: Option[String] = None, + event: String, + apiSecret: String, + baseUrl: String, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Hdfs(directory: String, + `type`: String, + path: String, + addresses: List[String], + hdfsUser: String, + krbCCacheSecret: Option[String] = None, + krbKeytabSecret: Option[String] = None, + krbUsername: Option[String] = None, + krbRealm: Option[String] = None, + krbConfigConfigMap: Option[String] = None, + krbServicePrincipalName: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Kafka(url: String, + topic: String, + partition: Int, + backOffDuration: Int, + backOffSteps: Int, + backOffFactor: Int, + backOffJitter: Int, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Redis(hostAddress: String, + hostPasswordSecret: Option[String] = None, + db: Option[Int] = None, + channels: List[String], + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Resource(namespace: String, + group: Option[String] = None, + version: Option[String] = None, + resource: String, + `type`: String, + filters: Map[String, String], + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Slack(endpoint: String, + port: Long, + token: String, + signingSecret: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class SNS(endpoint: String, + port: Long, + url: Option[String] = None, + accessKeySecret: Option[String] = None, + secretKeySecret: Option[String] = None, + region: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + case class Spec(eventBusName: String, + calendar: Map[String, Calendar] = Map(), + file: Map[String, File] = Map(), + generic: Map[String, Generic] = Map(), + github: Map[String, Github] = Map(), + gitlab: Map[String, Gitlab] = Map(), + hdfs: Map[String, Hdfs] = Map(), + kafka: Map[String, Kafka] = Map(), + redis: Map[String, Redis] = Map(), + replicas: Int = 1, + resource: Map[String, Resource] = Map(), + service: Option[Service] = None, + slack: Map[String, Slack] = Map(), + sns: Map[String, SNS] = Map(), + sqs: Map[String, SQS] = Map(), + stripe: Map[String, Stripe] = Map(), + template: Option[Template] = None, + webhook: Map[String, Webhook] = Map()) + + @JsonCodec + case class SQS(accessKeySecret: Option[String] = None, + secretKeySecret: Option[String] = None, + region: Option[String] = None, + queue: String, + waitTimeSeconds: Option[Int], + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Stripe(endpoint: String, + port: Long, + url: Option[String] = None, + apiKeySecret: String, + eventFilters: List[String], + filter: Option[EventSourceFilter] = None) + + case class Status(createdAt: Time) + + @JsonCodec + case class WatchPathConfig(directory: Option[String] = None, + path: Option[String] = None, + pathRegexp: Option[String] = None) + + @JsonCodec + case class Webhook(endpoint: String, + port: Long, + filter: Option[EventSourceFilter] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/Rollout.scala b/jvm/src/main/scala/com/harana/modules/argo/events/Rollout.scala new file mode 100644 index 0000000..ed840f6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/Rollout.scala @@ -0,0 +1,101 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object Rollout { + + type Rollout = CustomResource[Spec, Status] + type RolloutList = ListResource[Rollout] + + implicit lazy val analysisFmt: Format[Analysis] = Json.format[Analysis] + implicit lazy val blueGreenFmt: Format[BlueGreen] = Json.format[BlueGreen] + implicit lazy val blueGreenStatusFmt: Format[BlueGreenStatus] = Json.format[BlueGreenStatus] + implicit lazy val canaryFmt: Format[Canary] = Json.format[Canary] + implicit lazy val canaryStatusFmt: Format[CanaryStatus] = Json.format[CanaryStatus] + implicit lazy val canaryStepFmt: Format[CanaryStep] = Json.format[CanaryStep] + implicit lazy val pauseConditionFmt: Format[PauseCondition] = Json.format[PauseCondition] + implicit lazy val rolloutConditionFmt: Format[RolloutCondition] = Json.format[RolloutCondition] + implicit lazy val specFmt: Format[Spec] = Json.format[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val strategyFmt: Format[Strategy] = Json.format[Strategy] + + implicit lazy val resourceDefinition: ResourceDefinition[Rollout] = ResourceDefinition[Rollout]("Rollout", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[Rollout] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + case class Analysis(successfulRunHistoryLimit: Option[Int] = None, + unsuccessfulRunHistoryLimit: Option[Int] = None) + + case class BlueGreen(activeService: String, + previewService: String, + previewReplicaCount: Option[Int], + autoPromotionEnabled: Option[Boolean], + autoPromotionSeconds: Option[Int], + scaleDownDelaySeconds: Option[Int], + scaleDownDelayRevisionLimit: Option[Int]) + + case class BlueGreenStatus(activeSelector: String, + previousActiveSelector: String, + previewSelector: String, + scaleDownDelayStartTime: Time, + scaleUpPreviewCheckPoint: Boolean) + + case class Canary(stableService: String, + canaryService: String, + steps: List[CanaryStep] = List(), + maxSurge: Option[String] = None, + maxUnavailable: Option[String] = None) + + case class CanaryStatus(currentBackgroundAnalysisRun: String, + currentExperiment: String, + currentStepAnalysisRun: String, + stableRS: String) + + case class CanaryStep(weight: Int, + pause: Option[Int]) + + case class PauseCondition(reason: Option[String] = None, + startTime: Time) + + case class RolloutCondition(lastTransitionTime: Time, + lastUpdateTime: Time, + message: String, + reason: String, + status: String, + `type`: String) + + case class Spec(analysis: Option[Analysis] = None, + minReadySeconds: Option[Int] = None, + paused: Boolean = false, + progressDeadlineAbort: Boolean = false, + progressDeadlineSeconds: Option[Int] = None, + replicas: Option[Int] = Some(1), + restartAt: Option[String] = None, + revisionHistoryLimit: Option[Int] = None, + selector: Option[NodeSelector] = None, + strategy: Option[Strategy] = None) + + case class Status(abort: Option[Boolean] = None, + pauseConditions: List[PauseCondition] = List(), + controllerPause: Option[Boolean] = None, + currentPodHash: Option[String] = None, + replicas: Option[Int] = None, + updatedReplicas: Option[Int] = None, + readyReplicas: Option[Int] = None, + availableReplicas: Option[Int] = None, + currentStepIndex: Option[Int] = None, + collisionCount: Option[Int] = None, + observedGeneration: Option[String] = None, + conditions: List[RolloutCondition] = List(), + canary: Option[CanaryStatus] = None, + blueGreen: Option[BlueGreenStatus] = None, + HPAReplicas: Option[Int] = None, + selector: Option[String] = None) + + case class Strategy(blueGreen: Option[BlueGreen] = None, + canary: Option[Canary] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/Sensor.scala b/jvm/src/main/scala/com/harana/modules/argo/events/Sensor.scala new file mode 100644 index 0000000..fe02bb3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/Sensor.scala @@ -0,0 +1,152 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import com.harana.modules.argo.events.Trigger.{HttpTrigger, K8STrigger, SlackTrigger} +import com.harana.modules.argo.workflows.Workflow +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object Sensor { + + type Sensor = CustomResource[Spec, Status] + type SensorList = ListResource[Sensor] + + implicit lazy val conditionsResetCriteriaFmt: Format[ConditionsResetCriteria] = Json.format[ConditionsResetCriteria] + implicit lazy val conditionsResetByTimeFmt: Format[ConditionsResetByTime] = Json.format[ConditionsResetByTime] + implicit lazy val dataFilterFmt: Format[DataFilter] = Json.format[DataFilter] + implicit lazy val eventContextFmt: Format[EventContext] = Json.format[EventContext] + implicit lazy val eventDependencyFilterFmt: Format[EventDependencyFilter] = Json.format[EventDependencyFilter] + implicit lazy val eventDependencyFmt: Format[EventDependency] = Json.format[EventDependency] + implicit lazy val exprFilterFmt: Format[ExprFilter] = Json.format[ExprFilter] + implicit lazy val httpFmt: Format[Http] = Json.format[Http] + implicit lazy val k8sResourceFmt: Format[K8SResource] = Json.format[K8SResource] + implicit lazy val parameterFmt: Format[Parameter] = Json.format[Parameter] + implicit lazy val parameterSourceFmt: Format[ParameterSource] = Json.format[ParameterSource] + implicit lazy val resourceLabelsPolicyFmt: Format[ResourceLabelsPolicy] = Json.format[ResourceLabelsPolicy] + implicit lazy val specFmt: Format[Spec] = Json.format[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val statusPolicyFmt: Format[StatusPolicy] = Json.format[StatusPolicy] + implicit lazy val subscriptionFmt: Format[Subscription] = Json.format[Subscription] + implicit lazy val timeFmt: Format[Time] = Json.format[Time] + implicit lazy val timeFilterFmt: Format[TimeFilter] = Json.format[TimeFilter] + implicit lazy val triggerTemplateFmt: Format[TriggerTemplate] = Json.format[TriggerTemplate] + implicit lazy val triggerParameterFmt: Format[TriggerParameter] = Json.format[TriggerParameter] + implicit lazy val triggerParameterSourceFmt: Format[TriggerParameterSource] = Json.format[TriggerParameterSource] + implicit lazy val triggerPolicyFmt: Format[TriggerPolicy] = Json.format[TriggerPolicy] + implicit lazy val triggerFmt: Format[Trigger] = Json.format[Trigger] + + implicit lazy val resourceDefinition: ResourceDefinition[Sensor] = ResourceDefinition[Sensor]("Sensor", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[Sensor] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec) + .withLabels(("sensors.argoproj.io/sensor-controller-instanceid", "argo")) + .withName(name) + + case class ConditionsResetCriteria(byTime: ConditionsResetByTime) + + case class ConditionsResetByTime(cron: String, + timezone: Option[String] = None) + + case class DataFilter(path: String, + `type`: JSONType, + value: List[String] = List(), + comparator: Option[Comparator] = None, + template: Option[String] = None) + + case class EventContext(id: String, + source: String, + specversion: String, + `type`: String, + datacontenttype: String, + subject: String, + time: Time) + + case class EventDependencyFilter(time: Option[TimeFilter] = None, + context: Option[EventContext] = None, + data: List[DataFilter] = List(), + exprs: List[ExprFilter] = List()) + + case class EventDependency(name: String, + eventSourceName: String, + eventName: String, + filters: Option[EventDependencyFilter] = None) + + case class ExprFilter(expr: String, + fields: List[PayloadField]) + + case class Http(port: Int) + + case class K8SResource(apiVersion: String, + kind: String, + metadata: ObjectMetadata, + spec: Workflow.Spec) + + case class Parameter(dest: String, + action: Option[String] = None, + src: ParameterSource) + + case class ParameterSource(contextKey: Option[String] = None, + dataKey: Option[String] = None, + event: String, + value: Option[String] = None) + + case class Policy(backoff: Backoff, + errorOnBackoffTimeout: Boolean, + resourceLabels: ResourceLabelsPolicy) + + case class ResourceLabelsPolicy(labels: String) + + case class Spec(template: Option[Template] = None, + dependencies: List[EventDependency] = List(), + errorOnFailedRound: Option[Boolean] = None, + eventBusName: Option[String] = None, + replicas: Option[Int] = None, + subscription: Option[Subscription] = None, + triggers: List[Trigger] = List()) + + case class Status(completedAt: Option[Time] = None, + lastCycleTime: Time, + message: Option[String] = None, + nodes: Option[NodeStatus] = None, + phase: String, + resources: ObjectResource, + startedAt: Option[Time] = None, + triggerCycleCount: Option[Int] = None, + triggerCycleStatus: Int) + + case class Subscription(http: Option[Http] = None) + + case class TimeFilter(start: String, + stop: String) + + case class Trigger(template: TriggerTemplate, + parameters: List[TriggerParameter] = List(), + policy: Option[TriggerPolicy] = None, + retryStrategy: Option[Backoff] = None, + rateLimit: Option[RateLimit] = None) + + case class TriggerTemplate(name: String, + conditions: Option[String] = None, + http: Option[HttpTrigger] = None, + k8s: Option[K8STrigger] = None, + slack: Option[SlackTrigger] = None, + conditionsReset: List[ConditionsResetCriteria] = List()) + + + case class TriggerParameter(src: TriggerParameterSource, + dest: String, + action: TriggerParameterOption) + + case class TriggerParameterSource(dependencyName: String, + contextKey: String, + contextTemplate: String, + dataKey: String, + dataTemplate: String, + value: String) + + type TriggerParameterOption = String + + case class TriggerPolicy(k8s: Option[K8SResourcePolicy] = None, + status: Option[StatusPolicy] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/Trigger.scala b/jvm/src/main/scala/com/harana/modules/argo/events/Trigger.scala new file mode 100644 index 0000000..6f2c73d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/Trigger.scala @@ -0,0 +1,37 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import com.harana.modules.argo.events.Sensor.{K8SResource, TriggerParameter} +import play.api.libs.json.{Format, Json} + +object Trigger { + + implicit lazy val httpTriggerFmt: Format[HttpTrigger] = Json.format[HttpTrigger] + implicit lazy val k8sTriggerFmt: Format[K8STrigger] = Json.format[K8STrigger] + implicit lazy val k8sSourceFmt: Format[K8SSource] = Json.format[K8SSource] + implicit lazy val slackTriggerFmt: Format[SlackTrigger] = Json.format[SlackTrigger] + + case class K8STrigger(group: String, + version: String, + resource: String, + action: String, + source: K8SSource, + parameters: List[Parameter] = List()) + + case class K8SSource(resource: K8SResource) + + case class HttpTrigger(url: String, + payload: List[TriggerParameter] = List(), + tls: Option[TLSConfig] = None, + method: Option[String] = None, + parameters: List[TriggerParameter] = List(), + timeout: Option[Int] = None, + basicAuth: Option[BasicAuth] = None, + headers: Map[String, String] = Map(), + secureHeaders: List[SecureHeader] = List()) + + case class SlackTrigger(slackToken: SecretKeySelector, + channel: Option[String] = None, + message: Option[String] = None, + parameters: List[TriggerParameter] = List()) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/package.scala b/jvm/src/main/scala/com/harana/modules/argo/package.scala new file mode 100644 index 0000000..f89536a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/package.scala @@ -0,0 +1,286 @@ +package com.harana.modules + +import play.api.libs.json.{Format, Json} +import skuber.Security.{SELinuxOptions, Sysctl} +import skuber.{LocalObjectReference, PodSecurityContext} + +package object argo { + + implicit lazy val affinityFmt: Format[Affinity] = Json.format[Affinity] + implicit lazy val awsElasticBlockStoreVolumeSourceFmt: Format[AWSElasticBlockStoreVolumeSource] = Json.format[AWSElasticBlockStoreVolumeSource] + implicit lazy val backoffFmt: Format[Backoff] = Json.format[Backoff] + implicit lazy val basicAuthFmt: Format[BasicAuth] = Json.format[BasicAuth] + implicit lazy val configMapKeySelectorFmt: Format[ConfigMapKeySelector] = Json.format[ConfigMapKeySelector] + implicit lazy val configMapVolumeSourceFmt: Format[ConfigMapVolumeSource] = Json.format[ConfigMapVolumeSource] + implicit lazy val containerFmt: Format[Container] = Json.format[Container] + implicit lazy val environmentVariableFmt: Format[EnvironmentVariable] = Json.format[EnvironmentVariable] + implicit lazy val fieldsV1Fmt: Format[FieldsV1] = Json.format[FieldsV1] + implicit lazy val hostPathVolumeSourceFmt: Format[HostPathVolumeSource] = Json.format[HostPathVolumeSource] + implicit lazy val keyToPathFmt: Format[KeyToPath] = Json.format[KeyToPath] + implicit lazy val k8sResourcePolicyFmt: Format[K8SResourcePolicy] = Json.format[K8SResourcePolicy] + implicit lazy val labelSelectorRequirementFmt: Format[LabelSelectorRequirement] = Json.format[LabelSelectorRequirement] + implicit lazy val labelSelectorFmt: Format[LabelSelector] = Json.format[LabelSelector] + implicit lazy val localObjectReferenceFmt: Format[LocalObjectReference] = Json.format[LocalObjectReference] + implicit lazy val managedFieldsEntryFmt: Format[ManagedFieldsEntry] = Json.format[ManagedFieldsEntry] + implicit lazy val microTimeFmt: Format[MicroTime] = Json.format[MicroTime] + implicit lazy val nfsVolumeSourceFmt: Format[NFSVolumeSource] = Json.format[NFSVolumeSource] + implicit lazy val nodeAffinityFmt: Format[NodeAffinity] = Json.format[NodeAffinity] + implicit lazy val nodeStatusFmt: Format[NodeStatus] = Json.format[NodeStatus] + implicit lazy val nodeSelectorRequirementFmt: Format[NodeSelectorRequirement] = Json.format[NodeSelectorRequirement] + implicit lazy val nodeSelectorTermFmt: Format[NodeSelectorTerm] = Json.format[NodeSelectorTerm] + implicit lazy val objectMetaFmt: Format[ObjectMetadata] = Json.format[ObjectMetadata] + implicit lazy val nodeSelectorFmt: Format[NodeSelector] = Json.format[NodeSelector] + implicit lazy val objectResourceFmt: Format[ObjectResource] = Json.format[ObjectResource] + implicit lazy val ownerReferenceFmt: Format[OwnerReference] = Json.format[OwnerReference] + implicit lazy val parameterFmt: Format[Parameter] = Json.format[Parameter] + implicit lazy val payloadFieldFmt: Format[PayloadField] = Json.format[PayloadField] + implicit lazy val persistentVolumeClaimFmt: Format[PersistentVolumeClaim] = Json.format[PersistentVolumeClaim] + implicit lazy val persistentVolumeClaimVolumeSourceFmt: Format[PersistentVolumeClaimVolumeSource] = Json.format[PersistentVolumeClaimVolumeSource] + implicit lazy val podAffinityFmt: Format[PodAffinity] = Json.format[PodAffinity] + implicit lazy val podAffinityTermFmt: Format[PodAffinityTerm] = Json.format[PodAffinityTerm] + implicit lazy val podAntiAffinityFmt: Format[PodAntiAffinity] = Json.format[PodAntiAffinity] + implicit lazy val podSecurityContextFmt: Format[PodSecurityContext] = Json.format[PodSecurityContext] + implicit lazy val preferredSchedulingTermFmt: Format[PreferredSchedulingTerm] = Json.format[PreferredSchedulingTerm] + implicit lazy val rateLimitFmt: Format[RateLimit] = Json.format[RateLimit] + implicit lazy val requestsFmt: Format[Requests] = Json.format[Requests] + implicit lazy val resourcesFmt: Format[Resources] = Json.format[Resources] + implicit lazy val secretKeySelectorFmt: Format[SecretKeySelector] = Json.format[SecretKeySelector] + implicit lazy val secureHeaderFmt: Format[SecureHeader] = Json.format[SecureHeader] + implicit lazy val seLinuxOptionsFmt: Format[SELinuxOptions] = Json.format[SELinuxOptions] + implicit lazy val serviceFmt: Format[Service] = Json.format[Service] + implicit lazy val servicePortFmt: Format[ServicePort] = Json.format[ServicePort] + implicit lazy val sysctlFmt: Format[Sysctl] = Json.format[Sysctl] + implicit lazy val templateFmt: Format[Template] = Json.format[Template] + implicit lazy val templateMetdataFmt: Format[TemplateMetadata] = Json.format[TemplateMetadata] + implicit lazy val timeFmt: Format[Time] = Json.format[Time] + implicit lazy val tlsConfigFmt: Format[TLSConfig] = Json.format[TLSConfig] + implicit lazy val valueFromFmt: Format[ValueFrom] = Json.format[ValueFrom] + implicit lazy val valueFromSourceFmt: Format[ValueFromSource] = Json.format[ValueFromSource] + implicit lazy val volumeFmt: Format[Volume] = Json.format[Volume] + implicit lazy val volumeMountFmt: Format[VolumeMount] = Json.format[VolumeMount] + implicit lazy val volumeSourceFmt: Format[VolumeSource] = Json.format[VolumeSource] + implicit lazy val weightedPodAffinityTermFmt: Format[WeightedPodAffinityTerm] = Json.format[WeightedPodAffinityTerm] + + + case class Affinity(nodeAffinity: Option[NodeAffinity] = None, + podAffinity: Option[PodAffinity] = None, + podAntiAffinity: Option[PodAntiAffinity] = None) + + case class AWSElasticBlockStoreVolumeSource(fsType: Option[String] = None, + partition: Option[Int] = None, + readOnly: Option[Boolean] = None, + volumeID: Option[String] = None) + + case class BasicAuth(username: SecretKeySelector, + password: SecretKeySelector) + + case class Backoff(duration: String, + factor: Int, + jitte: Int, + steps: Int) + + type Comparator = String + + case class ConfigMapKeySelector(key: Option[String] = None, + name: Option[String] = None, + optional: Option[Boolean] = None) + + case class ConfigMapVolumeSource(defaultMode: Option[Int] = None, + items: List[KeyToPath] = List()) + + + case class Container(args: List[String] = List(), + command: List[String] = List(), + env: List[EnvironmentVariable] = List(), + image: String, + imagePullPolicy: Option[String] = None, + mirrorVolumeMounts: Option[Boolean] = None, + name: String, + resources: Option[Resources] = None, + volumeMounts: List[VolumeMount] = List()) + + case class EnvironmentVariable(name: String, + value: String) + + case class FieldsV1(Raw: Option[String] = None) + + case class HostPathVolumeSource(path: Option[String] = None, + `type`: Option[String] = None) + + type JSONType = String + + case class K8SResourcePolicy(labels: Map[String, String], + backoff: Backoff, + errorOnBackoffTimeout: Boolean) + + case class KeyToPath(key: Option[String] = None, + mode: Option[Int] = None, + path: Option[String] = None) + + case class LabelSelector(matchExpressions: List[LabelSelectorRequirement] = List(), + matchLabels: Map[String, String] = Map()) + + case class LabelSelectorRequirement(key: Option[String] = None, + operator: Option[String] = None, + values: List[String] = List()) + + case class ManagedFieldsEntry(apiVersion: Option[String] = None, + fieldsType: Option[String] = None, + fieldsV1: Option[FieldsV1] = None, + manager: Option[String] = None, + action: Option[String] = None, + time: Option[Time] = None) + + case class MicroTime(Time: String) + + case class NFSVolumeSource(path: Option[String] = None, + readOnly: Option[Boolean] = None, + server: Option[String] = None) + + case class NodeAffinity(preferredDuringSchedulingIgnoredDuringExecution: List[PreferredSchedulingTerm] = List(), + requiredDuringSchedulingIgnoredDuringExecution: Option[NodeSelector] = None) + + case class NodeSelector(nodeSelectorTerms: List[NodeSelectorTerm] = List()) + + case class NodeSelectorRequirement(key: Option[String] = None, + operator: Option[String] = None, + values: List[String] = List()) + + case class NodeSelectorTerm(matchExpressions: List[NodeSelectorRequirement] = List(), + matchFields: List[NodeSelectorRequirement] = List()) + + case class NodeStatus(displayName: String, + id: String, + message: Option[String] = None, + name: String, + phase: Option[String] = None, + updateTime: Option[MicroTime] = None) + + case class ObjectMetadata(annotations: Map[String, String] = Map(), + clusterName: Option[String] = None, + creationTimestamp: Option[Time] = None, + deletionGracePeriodSeconds: Option[Int] = None, + deletionTimestamp: Option[Time] = None, + finalizers: List[String] = List(), + generateName: Option[String] = None, + generation: Option[String] = None, + labels: Map[String, String] = Map(), + managedFields: List[ManagedFieldsEntry] = List(), + name: Option[String] = None, + namespace: Option[String] = None, + ownerReferences: List[OwnerReference] = List(), + resourceVersion: Option[String] = None, + selfLink: Option[String] = None, + uid: Option[String] = None) + + case class ObjectResource(deployment: ObjectMetadata, + service: ObjectMetadata) + + case class OwnerReference(apiVersion: Option[String] = None, + blockOwnerDeletion: Option[Boolean] = None, + controller: Option[Boolean] = None, + kind: Option[String] = None, + name: Option[String] = None, + uid: Option[String] = None) + + case class Parameter(default: Option[String] = None, + globalName: Option[String] = None, + name: String, + value: Option[String] = None, + valueFrom: Option[ValueFrom] = None) + + case class PayloadField(path: String, + name: String) + + case class PodAffinity(preferredDuringSchedulingIgnoredDuringExecution: List[WeightedPodAffinityTerm] = List(), + requiredDuringSchedulingIgnoredDuringExecution: List[PodAffinityTerm] = List()) + + case class PodAffinityTerm(labelSelector: Option[LabelSelector] = None, + namespaces: List[String] = List(), + topologyKey: Option[String] = None) + + case class PodAntiAffinity(labelSelector: Option[LabelSelector] = None, + namespaces: List[String] = List(), + topologyKey: Option[String] = None) + + case class PersistentVolumeClaim(claimName: String) + + case class PersistentVolumeClaimVolumeSource(claimName: Option[String] = None, + readOnly: Option[Boolean] = None) + + case class PreferredSchedulingTerm(preference: Option[NodeSelectorTerm] = None, + weight: Option[Int] = None) + + type RateLimitUnit = String + + case class RateLimit(unit: RateLimitUnit, + requestsPerUnit: Int) + + case class Requests(cpu: Option[String] = None, + memory: Option[String] = None) + + case class Resources(requests: Option[Requests] = None) + + case class SecureHeader(name: String, + valueFrom: ValueFromSource) + + case class SecretKeySelector(key: Option[String] = None, + name: Option[String] = None, + optional: Option[Boolean] = None) + + case class Service(ports: List[ServicePort], + clusterIP: Option[String] = None) + + case class ServicePort(protocol: String, + port: Int, + targetPort: Int) + + case class StatusPolicy(allow: List[Int]) + + case class Template(metadata: Option[TemplateMetadata] = None, + serviceAccountName: Option[String] = None, + container: Option[Container] = None, + volumes: List[Volume] = List(), + securityContext: Option[PodSecurityContext] = None, + nodeSelector: Map[String, String] = Map(), + imagePullSecrets: List[LocalObjectReference] = List(), + priorityClassName: Option[String] = None, + priority: Option[Int] = None, + affinity: Option[Affinity] = None) + + case class TemplateMetadata(name: Option[String] = None, + labels: Map[String, String] = Map(), + volumes: List[Volume] = List()) + + case class TLSConfig(caCertSecret: SecretKeySelector, + clientCertSecret: SecretKeySelector, + clientKeySecret: SecretKeySelector) + + case class Time(nanos: Int, + seconds: Long) + + case class ValueFrom(path: String) + + case class ValueFromSource(configMapKeyRef: Option[ConfigMapKeySelector] = None, + secretKeyRef: Option[SecretKeySelector] = None) + + case class Volume(name: String, + persistentVolumeClaim: Option[PersistentVolumeClaim] = None, + secret: Map[String, String] = Map()) + + case class VolumeMount(mountPath: Option[String] = None, + mountPropagation: Option[String] = None, + name: String, + readOnly: Option[Boolean] = None, + subPath: Option[String] = None, + subPathExpr: Option[String] = None) + + case class VolumeSource(awsElasticBlockStore: Option[AWSElasticBlockStoreVolumeSource] = None, + configMap: Option[ConfigMapVolumeSource] = None, + hostPath: Option[HostPathVolumeSource] = None, + nfs: Option[NFSVolumeSource] = None, + persistentVolumeClaim: Option[PersistentVolumeClaimVolumeSource] = None) + + case class WeightedPodAffinityTerm(podAffinityTerm: Option[PodAffinityTerm] = None, + weight: Option[Int] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/workflows/Workflow.scala b/jvm/src/main/scala/com/harana/modules/argo/workflows/Workflow.scala new file mode 100644 index 0000000..d51ad60 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/workflows/Workflow.scala @@ -0,0 +1,55 @@ +package com.harana.modules.argo.workflows + +import ai.x.play.json.Jsonx +import com.harana.modules.argo.{Affinity, NodeStatus, PersistentVolumeClaim, SecretKeySelector, Time} +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object Workflow { + + type Workflow = CustomResource[Spec, Status] + type WorkflowList = ListResource[Workflow] + + implicit lazy val specFmt: Format[Spec] = Jsonx.formatCaseClass[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val resourceDefinition: ResourceDefinition[Workflow] = ResourceDefinition[Workflow]("Workflow", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[Workflow] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + case class Spec(activeDeadlineSeconds: Option[Int] = None, + affinity: Option[Affinity] = None, + arguments: Option[Arguments] = None, + automountServiceAccountToken: Option[Boolean] = None, + entrypoint: Option[String] = None, + hostNetwork: Option[Boolean] = None, + imagePullSecrets: List[SecretKeySelector] = List(), + nodeSelector: Option[String] = None, + onExit: Option[String] = None, + parallelism: Option[Int] = None, + podGC: Option[PodGC] = None, + podSpecPath: Option[String] = None, + priority: Option[Int] = None, + schedulerName: Option[String] = None, + serviceAccountName: Option[String] = None, + suspend: Option[Boolean] = None, + templates: List[Template] = List(), + tolerations: List[Toleration] = List(), + ttlSecondsAfterFinished: Option[Int] = None, + ttlStrategy: Option[TtlStrategy] = None, + volumeClaimTemplates: List[PersistentVolumeClaim] = List(), + volumes: List[Volume] = List()) + + case class Status(compressedNodes: String, + finishedAt: Time, + message: String, + nodes: NodeStatus, + offloadNodeStatusVersion: String, + outputs: Outputs, + persistentVolumeClaims: List[Volume] = List(), + phase: String, + startedAt: Time, + storedTemplates: Template) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/workflows/WorkflowTemplate.scala b/jvm/src/main/scala/com/harana/modules/argo/workflows/WorkflowTemplate.scala new file mode 100644 index 0000000..6476f70 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/workflows/WorkflowTemplate.scala @@ -0,0 +1,53 @@ +package com.harana.modules.argo.workflows + +import ai.x.play.json.Jsonx +import com.harana.modules.argo.{Affinity, NodeStatus, PersistentVolumeClaim, SecretKeySelector, Time} +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ResourceDefinition} + +object WorkflowTemplate { + + type WorkflowTemplate = CustomResource[Spec, Status] + + implicit lazy val specFmt: Format[Spec] = Jsonx.formatCaseClass[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val resourceDefinition: ResourceDefinition[WorkflowTemplate] = ResourceDefinition[WorkflowTemplate]("WorkflowTemplate", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[WorkflowTemplate] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + case class Spec(activeDeadlineSeconds: Option[Int] = None, + affinity: Option[Affinity] = None, + arguments: Option[Arguments] = None, + automountServiceAccountToken: Option[Boolean] = None, + entrypoint: Option[String] = None, + hostNetwork: Option[Boolean] = None, + imagePullSecrets: List[SecretKeySelector] = List(), + nodeSelector: Option[String] = None, + onExit: Option[String] = None, + parallelism: Option[Int] = None, + podGC: Option[PodGC] = None, + podSpecPath: Option[String] = None, + priority: Option[Int] = None, + schedulerName: Option[String] = None, + serviceAccountName: Option[String] = None, + suspend: Option[Boolean] = None, + templates: List[Template] = List(), + tolerations: List[Toleration] = List(), + ttlSecondsAfterFinished: Option[Int] = None, + ttlStrategy: Option[TtlStrategy] = None, + volumeClaimTemplates: List[PersistentVolumeClaim] = List(), + volumes: List[Volume] = List()) + + case class Status(compressedNodes: String, + finishedAt: Time, + message: String, + nodes: NodeStatus, + offloadNodeStatusVersion: String, + outputs: Outputs, + persistentVolumeClaims: List[Volume] = List(), + phase: String, + startedAt: Time, + storedTemplates: Template) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/workflows/package.scala b/jvm/src/main/scala/com/harana/modules/argo/workflows/package.scala new file mode 100644 index 0000000..9fd4271 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/workflows/package.scala @@ -0,0 +1,235 @@ +package com.harana.modules.argo + +import ai.x.play.json.{CamelToSnakeNameEncoder, Jsonx} +import play.api.libs.json.{Format, Json} + +package object workflows { + + implicit lazy val encoder: CamelToSnakeNameEncoder = CamelToSnakeNameEncoder() + + implicit lazy val argumentsFmt: Format[Arguments] = Json.format[Arguments] + implicit lazy val artifactoryFmt: Format[Artifactory] = Json.format[Artifactory] + implicit lazy val artifactoryArtifactFmt: Format[ArtifactoryArtifact] = Json.format[ArtifactoryArtifact] + implicit lazy val artifactoryAuthFmt: Format[ArtifactoryAuth] = Json.format[ArtifactoryAuth] + implicit lazy val artifactFmt: Format[Artifact] = Json.format[Artifact] + implicit lazy val artifactLocationFmt: Format[ArtifactLocation] = Json.format[ArtifactLocation] + implicit lazy val backoffFmt: Format[Backoff] = Json.format[Backoff] + implicit lazy val continueOnFmt: Format[ContinueOn] = Json.format[ContinueOn] + implicit lazy val dagFmt: Format[DAG] = Json.format[DAG] + implicit lazy val dagTaskFmt: Format[DAGTask] = Json.format[DAGTask] + implicit lazy val executorConfigFmt: Format[ExecutorConfig] = Json.format[ExecutorConfig] + implicit lazy val gitArtifactFmt: Format[GitArtifact] = Json.format[GitArtifact] + implicit lazy val hdfsArtifactFmt: Format[HDFSArtifact] = Json.format[HDFSArtifact] + implicit lazy val hdfsConfigFmt: Format[HDFSConfig] = Json.format[HDFSConfig] + implicit lazy val hdfsKrbConfigFmt: Format[HDFSKrbConfig] = Json.format[HDFSKrbConfig] + implicit lazy val httpArtifactFmt: Format[HTTPArtifact] = Json.format[HTTPArtifact] + implicit lazy val inputsFmt: Format[Inputs] = Json.format[Inputs] + implicit lazy val metadataFmt: Format[Metadata] = Json.format[Metadata] + implicit lazy val outputsFmt: Format[Outputs] = Json.format[Outputs] + implicit lazy val podGcFmt: Format[PodGC] = Json.format[PodGC] + implicit lazy val rawFmt: Format[Raw] = Json.format[Raw] + implicit lazy val rawArtifactFmt: Format[RawArtifact] = Json.format[RawArtifact] + implicit lazy val resourceFmt: Format[Resource] = Json.format[Resource] + implicit lazy val retryStrategyFmt: Format[RetryStrategy] = Json.format[RetryStrategy] + implicit lazy val s3Fmt: Format[S3] = Json.format[S3] + implicit lazy val s3ArtifactFmt: Format[S3Artifact] = Json.format[S3Artifact] + implicit lazy val s3BucketFmt: Format[S3Bucket] = Json.format[S3Bucket] + implicit lazy val scriptFmt: Format[Script] = Json.format[Script] + implicit lazy val sequenceFmt: Format[Sequence] = Json.format[Sequence] + implicit lazy val sidecarFmt: Format[Sidecar] = Json.format[Sidecar] + implicit lazy val stepFmt: Format[Step] = Json.format[Step] + implicit lazy val suspendFmt: Format[Suspend] = Json.format[Suspend] + implicit lazy val templateFmt: Format[Template] = Jsonx.formatCaseClass[Template] + implicit lazy val templateRefFmt: Format[TemplateRef] = Json.format[TemplateRef] + implicit lazy val tolerationFmt: Format[Toleration] = Json.format[Toleration] + implicit lazy val ttlStrategyFmt: Format[TtlStrategy] = Json.format[TtlStrategy] + implicit lazy val volumeFmt: Format[Volume] = Json.format[Volume] + + case class Arguments(artifacts: List[Artifact] = List(), + parameters: List[Parameter] = List()) + + case class Artifactory(url: Option[String] = None, + usernameSecret: Option[SecretKeySelector] = None, + passwordSecret: Option[SecretKeySelector] = None) + + case class ArtifactoryArtifact(artifactoryAuth: Option[ArtifactoryAuth] = None, + url: Option[String] = None) + + case class ArtifactoryAuth(passwordSecret: Option[SecretKeySelector] = None, + usernameSecret: Option[SecretKeySelector] = None) + + case class Artifact(name: String, + artifactory: Option[Artifactory] = None, + http: Map[String, String] = Map(), + path: Option[String] = None, + raw: Option[Raw] = None, + s3: Option[S3] = None, + mode: Option[String] = None) + + case class ArtifactLocation(archiveLogs: Option[Boolean] = None, + artifactory: Option[ArtifactoryArtifact] = None, + git: Option[GitArtifact] = None, + hdfs: Option[HDFSArtifact] = None, + http: Option[HTTPArtifact] = None, + raw: Option[RawArtifact] = None, + s3: Option[S3Artifact] = None) + + case class Backoff(duration: Option[String] = None, + factor: Option[Int] = None, + maxDuration: Option[String] = None) + + case class ContinueOn(error: Option[Boolean] = None, + failed: Option[Boolean] = None) + + case class DAG(failFast: Option[Boolean] = None, + target: Option[String] = None, + tasks: List[DAGTask] = List()) + + case class DAGTask(arguments: Option[Arguments] = None, + continueOn: Option[ContinueOn] = None, + dependencies: List[String] = List(), + name: Option[String] = None, + onExit: Option[String] = None, + template: Option[String] = None, + templateRef: Option[TemplateRef] = None, + when: Option[String] = None, + withItems: List[String] = List(), + withSequence: Option[Sequence] = None) + + case class ExecutorConfig(serviceAccountName: Option[String] = None) + + case class GitArtifact(depth: Option[Long] = None, + fetch: List[String] = List()) + + case class HDFSArtifact(force: Option[Boolean] = None, + hDFSConfig: Option[HDFSConfig] = None, + path: Option[String] = None) + + case class HDFSConfig(hDFSKrbConfig: Option[HDFSKrbConfig] = None, + hdfsUser: Option[String] = None) + + case class HDFSKrbConfig(krbCCacheSecret: Option[SecretKeySelector] = None, + krbConfigConfigMap: Option[ConfigMapKeySelector] = None, + krbKeytabSecret: Option[SecretKeySelector] = None, + krbRealm: Option[String] = None, + krbServicePrincipalName: Option[String] = None, + krbUsername: Option[String] = None) + + case class HTTPArtifact(url: Option[String] = None) + + case class Inputs(artifacts: List[Artifact] = List(), + parameters: List[Parameter] = List()) + + case class Metadata(annotations: Option[String] = None, + labels: Option[String] = None) + + case class Outputs(artifacts: List[Artifact] = List(), + parameters: List[Parameter] = List(), + result: Option[String] = None) + + case class PodGC(strategy: String) + + case class Raw(data: String) + + case class RawArtifact(data: Option[String] = None) + + case class Resource(action: String, + failureCondition: Option[String] = None, + manifest: Option[String] = None, + successCondition: Option[String] = None) + + case class RetryStrategy(backoff: Option[Backoff] = None, + limit: Int, + retryPolicy: String) + + case class S3(accessKeySecret: Option[SecretKeySelector] = None, + bucket: Option[String] = None, + endpoint: Option[String] = None, + insecure: Option[Boolean] = None, + key: Option[String] = None, + region: Option[String] = None, + roleARN: Option[String] = None, + secretKeySecret: Option[SecretKeySelector] = None) + + case class S3Artifact(key: Option[String] = None, + s3bucket: Option[S3Bucket] = None) + + case class S3Bucket(accessKeySecret: Option[SecretKeySelector] = None, + bucket: Option[String] = None, + endpoint: Option[String] = None, + insecure: Option[Boolean] = None, + region: Option[Boolean] = None, + roleARN: Option[String] = None, + secretKeySecret: Option[SecretKeySelector]) + + case class Script(image: String, + command: List[String] = List(), + source: String) + + case class Sidecar(name: String, + image: String) + + case class Sequence(count: Option[String] = None, + end: Option[String] = None, + format: Option[String] = None, + start: Option[String] = None) + + case class Step(arguments: Option[Arguments] = None, + continueOn: Option[ContinueOn] = None, + name: String, + onExit: Option[String] = None, + template: Option[String] = None, + templateRef: Option[String] = None, + when: Option[String] = None, + withItems: List[String] = List(), + withParam: Option[String] = None, + withSequence: Option[Sequence] = None) + + case class Suspend(duration: Option[String] = None) + + case class Template(archiveLocation: Option[ArtifactLocation] = None, + container: Option[Container] = None, + daemon: Option[Boolean] = None, + dag: Option[DAG] = None, + executor: Option[ExecutorConfig] = None, + initContainers: List[Container] = List(), + inputs: Option[Inputs] = None, + metadata: Option[Metadata] = None, + name: String, + nodeSelector: Option[String] = None, + outputs: Option[Outputs] = None, + parallelism: Option[Int] = None, + parameters: List[Parameter] = List(), + podSpecPath: Option[String] = None, + priority: Option[Int] = None, + priorityClassName: Option[String] = None, + resource: Option[Resource] = None, + retryStrategy: Option[RetryStrategy] = None, + schedulerName: Option[String] = None, + script: Option[Script] = None, + serviceAccountName: Option[String] = None, + sidecars: List[Container] = List(), + steps: List[Step] = List(), + suspend: Option[Suspend] = None, + template: Option[String] = None, + templateRef: Option[TemplateRef] = None, + tolerations: List[Toleration] = List(), + volumes: List[Volume] = List()) + + case class TemplateRef(name: Option[String] = None, + runtimeResolution: Option[Boolean] = None, + template: Option[String] = None) + + case class Toleration(effect: Option[String] = None, + key: Option[String] = None, + operator: Option[String] = None, + tolerationSeconds: Option[Int] = None, + value: Option[String] = None) + + case class TtlStrategy(secondsAfterCompletion: Option[Int] = None, + secondsAfterSuccess: Option[Int] = None, + secondsAfterFailure: Option[Int] = None) + + case class Volume(name: Option[String] = None, + volumeSource: Option[VolumeSource] = None) +} diff --git a/jvm/src/main/scala/com/harana/modules/auth0/Auth0.scala b/jvm/src/main/scala/com/harana/modules/auth0/Auth0.scala new file mode 100644 index 0000000..b8bbd58 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/auth0/Auth0.scala @@ -0,0 +1,61 @@ +package com.harana.modules.auth0 + +import com.auth0.json.auth.{CreatedUser, TokenHolder, UserInfo} +import com.auth0.json.mgmt.roles.Role +import com.auth0.json.mgmt.users.User +import com.harana.modules.auth0.models.Auth0Error +import zio.IO +import zio.macros.accessible + +import java.net.URL + +@accessible +trait Auth0 { + + def authorizeUrl(redirectUri: String, + audience: Option[String] = None, + connection: Option[String] = None, + parameter: Option[(String, String)] = None, + responseType: Option[String] = None, + scope: Option[String] = None, + state: Option[String] = None): IO[Nothing, URL] + + def logoutUrl(returnToUrl: String, setClientId: Boolean, useFederated: Option[Boolean] = None): IO[Nothing, URL] + + def userInfo(accessToken: String): IO[Auth0Error, UserInfo] + + def resetPassword(email: String): IO[Auth0Error, Unit] + + def signUp(email: String, username: Option[String], password: String): IO[Auth0Error, CreatedUser] + + def login(emailOrUsername: String, password: String, realm: Option[String]): IO[Auth0Error, TokenHolder] + + def requestToken(audience: String): IO[Auth0Error, TokenHolder] + + def revokeToken(refreshToken: String): IO[Auth0Error, Unit] + + def renewAuth(refreshToken: String): IO[Auth0Error, TokenHolder] + + def exchangeCode(code: String, redirectUri: String): IO[Auth0Error, TokenHolder] + + def listByEmail(email: String): IO[Auth0Error, List[User]] + + def getUser(id: String): IO[Auth0Error, User] + + def createUser(user: User): IO[Auth0Error, User] + + def deleteUser(id: String): IO[Auth0Error, Unit] + + def updateUser(id: String, user: User): IO[Auth0Error, User] + + def getRole(id: String): IO[Auth0Error, Role] + + def createRole(role: Role): IO[Auth0Error, Role] + + def deleteRole(id: String): IO[Auth0Error, Unit] + + def updateRole(id: String, role: Role): IO[Auth0Error, Role] + + def assignUsersToRole(roleId: String, userIds: List[String]): IO[Auth0Error, Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/auth0/LiveAuth0.scala b/jvm/src/main/scala/com/harana/modules/auth0/LiveAuth0.scala new file mode 100644 index 0000000..b652fd7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/auth0/LiveAuth0.scala @@ -0,0 +1,205 @@ +package com.harana.modules.auth0 + +import com.auth0.client.auth.AuthAPI +import com.auth0.client.mgmt.ManagementAPI +import com.auth0.client.mgmt.filter.UserFilter +import com.auth0.exception.{APIException, Auth0Exception, RateLimitException} +import com.auth0.json.auth._ +import com.auth0.json.mgmt.roles.Role +import com.auth0.json.mgmt.users.User +import com.auth0.net.Request +import com.harana.modules.auth0.models.Auth0Error +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{IO, UIO, ZIO, ZLayer} + +import java.net.URL +import scala.jdk.CollectionConverters._ + +object LiveAuth0 { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAuth0(config, logger, micrometer) + } +} + +case class LiveAuth0(config: Config, logger: Logger, micrometer: Micrometer) extends Auth0 { + + private val authApi = for { + domain <- config.string("auth0.domain", "") + clientId <- config.secret("auth0-client-id") + clientSecret <- config.secret("auth0-client-secret") + } yield { + AuthAPI.newBuilder(domain, clientId, clientSecret).build() + } + + private val managementApi = for { + apiToken <- config.secret("auth0-api-token") + domain <- config.string("auth0.domain", "") + } yield { + ManagementAPI.newBuilder(domain, apiToken).build() + } + + def authorizeUrl(redirectUri: String, + audience: Option[String] = None, + connection: Option[String] = None, + parameter: Option[(String, String)] = None, + responseType: Option[String] = None, + scope: Option[String] = None, + state: Option[String] = None): UIO[URL] = + for { + a <- authApi + r <- ZIO.succeed { + var b = a.authorizeUrl(redirectUri) + if (audience.nonEmpty) b = b.withAudience(audience.get) + if (audience.nonEmpty) b = b.withAudience(audience.get) + if (connection.nonEmpty) b = b.withConnection(connection.get) + if (parameter.nonEmpty) b = b.withParameter(parameter.get._1, parameter.get._2) + if (responseType.nonEmpty) b = b.withResponseType(responseType.get) + if (scope.nonEmpty) b = b.withScope(scope.get) + if (state.nonEmpty) b = b.withState(state.get) + new URL(b.build()) + } + } yield r + + def logoutUrl(returnToUrl: String, setClientId: Boolean, useFederated: Option[Boolean] = None): UIO[URL] = + for { + a <- authApi + r <- ZIO.succeed { + var b = a.logoutUrl(returnToUrl, setClientId) + if (useFederated.nonEmpty) b = b.useFederated(useFederated.get) + new URL(b.build()) + } + } yield r + + def userInfo(accessToken: String): IO[Auth0Error, UserInfo] = + for { + a <- authApi + r <- execute(a.userInfo(accessToken)) + } yield r + + def resetPassword(email: String): IO[Auth0Error, Unit] = + for { + a <- authApi + _ <- execute(a.resetPassword(email, "Username-Password-Authentication")) + r <- ZIO.unit + } yield r + + def signUp(email: String, username: Option[String], password: String): IO[Auth0Error, CreatedUser] = + for { + a <- authApi + r <- execute( + if (username.nonEmpty) a.signUp(email, username.get, password, "Username-Password-Authentication") + else a.signUp(email, password, "Username-Password-Authentication") + ) + } yield r + + def login(emailOrUsername: String, password: String, realm: Option[String]): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute( + if (realm.nonEmpty) a.login(emailOrUsername, password, realm.get) + else a.login(emailOrUsername, password) + ) + } yield r + + def requestToken(audience: String): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute(a.requestToken(audience)) + } yield r + + def revokeToken(refreshToken: String): IO[Auth0Error, Unit] = + for { + a <- authApi + _ <- execute(a.revokeToken(refreshToken)) + r <- ZIO.unit + } yield r + + def renewAuth(refreshToken: String): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute(a.renewAuth(refreshToken)) + } yield r + + def exchangeCode(code: String, redirectUri: String): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute(a.exchangeCode(code, redirectUri)) + } yield r + + def listByEmail(email: String): IO[Auth0Error, List[User]] = + for { + m <- managementApi + r <- execute(m.users.listByEmail(email, new UserFilter)).map(_.asScala.toList) + } yield r + + def getUser(id: String): IO[Auth0Error, User] = + for { + m <- managementApi + r <- execute(m.users.get(id, new UserFilter())) + } yield r + + def createUser(user: User): IO[Auth0Error, User] = + for { + m <- managementApi + r <- execute(m.users.create(user)) + } yield r + + def deleteUser(id: String): IO[Auth0Error, Unit] = + for { + m <- managementApi + _ <- execute(m.users.delete(id)) + r <- ZIO.unit + } yield r + + def updateUser(id: String, user: User): IO[Auth0Error, User] = + for { + m <- managementApi + r <- execute(m.users.update(id, user)) + } yield r + + def getRole(id: String): IO[Auth0Error, Role] = + for { + m <- managementApi + r <- execute(m.roles.get(id)) + } yield r + + def createRole(role: Role): IO[Auth0Error, Role] = + for { + m <- managementApi + r <- execute(m.roles.create(role)) + } yield r + + def deleteRole(id: String): IO[Auth0Error, Unit] = + for { + m <- managementApi + _ <- execute(m.roles.delete(id)) + r <- ZIO.unit + } yield r + + def updateRole(id: String, role: Role): IO[Auth0Error, Role] = + for { + m <- managementApi + r <- execute(m.roles.update(id, role)) + } yield r + + def assignUsersToRole(roleId: String, userIds: List[String]): IO[Auth0Error, Unit] = + for { + m <- managementApi + _ <- execute(m.roles.assignUsers(roleId, userIds.asJava)) + r <- ZIO.unit + } yield r + + private def execute[T](request: Request[T]): IO[Auth0Error, T] = + ZIO.attempt(request).mapBoth({ + case e: RateLimitException => Auth0Error.RateLimit(e) + case e: APIException => Auth0Error.Api(e) + case e: Auth0Exception => Auth0Error.Request(e) + }, _.execute().getBody) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/auth0/models/Auth0Error.scala b/jvm/src/main/scala/com/harana/modules/auth0/models/Auth0Error.scala new file mode 100644 index 0000000..5b42343 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/auth0/models/Auth0Error.scala @@ -0,0 +1,8 @@ +package com.harana.modules.auth0.models + +sealed trait Auth0Error +object Auth0Error { + case class Api(e: Exception) extends Auth0Error + case class RateLimit(e: Exception) extends Auth0Error + case class Request(e: Exception) extends Auth0Error +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws/AWS.scala b/jvm/src/main/scala/com/harana/modules/aws/AWS.scala new file mode 100644 index 0000000..1e3eb65 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws/AWS.scala @@ -0,0 +1,36 @@ +package com.harana.modules.aws + +import awscala.iam.AccessKey +import awscala.s3.S3ObjectSummary +import com.amazonaws.services.simpleemail.model.{Message, Template} +import zio.Task +import zio.macros.accessible + +import java.io.InputStream + +@accessible +trait AWS { + + def iamCreateS3User(name: String, bucket: String, prefix: String): Task[AccessKey] + def iamDeleteUser(name: String): Task[Unit] + + def s3CreateBucket(name: String): Task[Unit] + def s3List(bucket: String, prefix: Option[String]): Task[List[S3ObjectSummary]] + def s3ListAsStream(bucket: String, prefix: Option[String]): Task[Stream[Either[String, S3ObjectSummary]]] + def s3ListTags(bucket: String, at: String): Task[Map[String, String]] + def s3CopyFile(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] + def s3CopyFolder(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] + def s3Move(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] + def s3Get(bucket: String, at: String): Task[InputStream] + def s3Put(bucket: String, at: String, inputStream: InputStream, contentLength: Long): Task[Unit] + def s3Rename(bucket: String, from: String, to: String): Task[Unit] + def s3Delete(bucket: String, at: String): Task[Unit] + def s3Tag(bucket: String, at: String, tags: Map[String, String]): Task[Unit] + + def sesCreateTemplate(template: Template): Task[Unit] + def sesDeleteTemplate(name: String): Task[Unit] + def sesSendEmail(message: Message, to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] + def sesSendTemplatedEmail(template: String, templateValues: Map[String, String], to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] + def sesSendBulkTemplatedEmail(template: String, toWithTemplateValues: List[(String, Map[String, String])], sender: String, replyTo: List[String] = List()): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws/LiveAWS.scala b/jvm/src/main/scala/com/harana/modules/aws/LiveAWS.scala new file mode 100644 index 0000000..2aec2b8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws/LiveAWS.scala @@ -0,0 +1,290 @@ +package com.harana.modules.aws + +import awscala._ +import awscala.iam.{AccessKey, IAM} +import awscala.s3.{Bucket, S3, S3ObjectSummary} +import com.amazonaws.auth._ +import com.amazonaws.services.identitymanagement.model.DeleteUserPolicyRequest +import com.amazonaws.services.s3.model._ +import com.amazonaws.services.s3.transfer.TransferManagerBuilder +import com.amazonaws.services.simpleemail.model._ +import com.amazonaws.services.simpleemail.{AmazonSimpleEmailServiceAsync, AmazonSimpleEmailServiceAsyncClient} +import com.harana.modules.aws.LiveAWS._ +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.circe.syntax._ +import zio.{Task, ZIO, ZLayer} + +import java.io.InputStream +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveAWS { + val credentialsProviderRef = new AtomicReference[Option[AWSCredentialsProvider]](None) + val iamRef = new AtomicReference[Option[IAM]](None) + val s3Ref = new AtomicReference[Option[S3]](None) + val sesRef = new AtomicReference[Option[AmazonSimpleEmailServiceAsync]](None) + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAWS(config, logger, micrometer) + } +} + +case class LiveAWS(config: Config, logger: Logger, micrometer: Micrometer) extends AWS { + + private def credentialsProvider = + for { + provider <- if (credentialsProviderRef.get.nonEmpty) ZIO.attempt(credentialsProviderRef.get.get) else + for { + accessId <- config.secret("aws-access-id") + secretKey <- config.secret("aws-secret-key") + credentialsFile <- config.optString("aws.credentialsFile") + useCredentialsFile <- config.boolean("aws.useCredentialsFile", default = false) + useEnvironmentVariables <- config.boolean("aws.useEnvironmentVariables", default = false) + useInstanceProfile <- config.boolean("aws.useInstanceProfile", default = false) + profile <- config.optString("aws.profile") + provider = (accessId, secretKey, credentialsFile, useCredentialsFile, useEnvironmentVariables, useInstanceProfile, profile) match { + case (_, _, _, _, true, _, _) => new EnvironmentVariableCredentialsProvider() + case (_, _, _, _, _, true, _) => InstanceProfileCredentialsProvider.getInstance() + case (a, s, _, _, _, _, _) => new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s)) + } + } yield provider + _ = credentialsProviderRef.set(Some(provider)) + } yield provider + + + private def iamClient = + for { + client <- if (iamRef.get.nonEmpty) ZIO.attempt(iamRef.get.get) else + for { + creds <- credentialsProvider + iam <- ZIO.attempt(IAM(creds)) + } yield iam + _ = iamRef.set(Some(client)) + } yield client + + + private def s3Client = + for { + client <- if (s3Ref.get.nonEmpty) ZIO.attempt(s3Ref.get.get) else + for { + creds <- credentialsProvider + region <- config.secret("aws-region") + s3 <- ZIO.attempt(S3(creds)(awscala.Region(region))) + } yield s3 + _ = s3Ref.set(Some(client)) + } yield client + + + private def sesClient = + for { + client <- if (sesRef.get.nonEmpty) ZIO.attempt(sesRef.get.get) else + for { + creds <- credentialsProvider + region <- config.secret("aws-region") + client <- ZIO.succeed(AmazonSimpleEmailServiceAsyncClient.asyncBuilder().withCredentials(creds).withRegion(region).build()) + } yield client + _ = sesRef.set(Some(client)) + } yield client + + + private def s3Bucket(bucket: String): Task[Bucket] = + for { + client <- s3Client + bucket <- ZIO.fromOption(client.bucket(bucket)).orElseFail(new Throwable("No available bucket")) + } yield bucket + + + def iamCreateS3User(name: String, bucket: String, prefix: String): Task[AccessKey] = + for { + client <- iamClient + user <- ZIO.attempt(client.createUser(name)) + s3arn = s"arn:aws:s3:::$bucket/$prefix" + policy = Policy(Seq(Statement(Effect.Allow, Seq(Action("s3:*")), Seq(Resource(s3arn))))) + _ = user.putPolicy(s"$name-s3", policy)(client) + accessKey = user.createAccessKey()(client) + } yield accessKey + + + def iamDeleteUser(name: String): Task[Unit] = + for { + client <- iamClient + user <- ZIO.attempt(client.user(name)) + _ <- ZIO.attempt(client.deleteUserPolicy(new DeleteUserPolicyRequest().withUserName(name).withPolicyName(s"$name-s3"))) + _ <- ZIO.foreachDiscard(user)(u => ZIO.attempt(client.delete(u))) + } yield () + + + def s3CreateBucket(name: String): Task[Unit] = + for { + client <- s3Client + _ <- ZIO.attempt(client.createBucket(name)) + } yield () + + + def s3List(bucket: String, prefix: Option[String]): Task[List[S3ObjectSummary]] = + for { + client <- s3Client + bucket <- s3Bucket(bucket) + summaries <- ZIO.attempt(client.objectSummaries(bucket, prefix.getOrElse("")).toList) + } yield summaries + + + def s3ListAsStream(bucket: String, prefix: Option[String]): Task[Stream[Either[String, S3ObjectSummary]]] = + for { + client <- s3Client + bucket <- s3Bucket(bucket) + summaries <- ZIO.attempt(client.ls(bucket, prefix.getOrElse(""))) + } yield summaries + + + def s3ListTags(bucket: String, at: String): Task[Map[String, String]] = + for { + client <- s3Client + request = new GetObjectTaggingRequest(bucket, at) + tagging <- ZIO.attempt(client.getObjectTagging(request)) + tags = tagging.getTagSet.asScala.map(t => t.getKey -> t.getValue).toMap + } yield tags + + + def s3CopyFile(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] = + for { + client <- s3Client + _ <- ZIO.attempt(client.copyObject(fromBucket, from, toBucket.getOrElse(fromBucket), to)) + } yield () + + + def s3CopyFolder(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] = + for { + client <- s3Client + manager <- ZIO.attempt { + val tm = TransferManagerBuilder.standard + tm.setS3Client(client) + tm.build + } + fromFiles <- ZIO.attempt(client.listObjects(fromBucket, s"$from/").getObjectSummaries.asScala.toList) + _ <- ZIO.foreachParDiscard(fromFiles) { file => + for { + filename <- ZIO.succeed(file.getKey.replace(s"$from/", "")) + _ <- logger.debug(s"Copying $fromBucket/${file.getKey} to $toBucket/$to/$filename") + _ <- ZIO.attempt(manager.copy(fromBucket, file.getKey, toBucket.getOrElse(fromBucket), s"$to/$filename").waitForCompletion()).when(!filename.isEmpty) + } yield () + } + } yield () + + + def s3Move(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] = + for { + _ <- s3CopyFile(fromBucket, from, toBucket, to) + _ <- s3Delete(fromBucket, from) + } yield () + + + def s3Rename(bucket: String, from: String, to: String): Task[Unit] = + s3Move(bucket, from, None, to) + + + def s3Get(bucket: String, at: String): Task[InputStream] = + for { + client <- s3Client + bucket <- s3Bucket(bucket) + inputStream <- ZIO.attempt(client.getObject(bucket, at).get.content) + } yield inputStream + + + def s3Put(bucket: String, at: String, inputStream: InputStream, contentLength: Long): Task[Unit] = + for { + client <- s3Client + metadata = new ObjectMetadata() + _ = metadata.setContentLength(contentLength) + _ <- ZIO.attempt(client.putObject(bucket, at, inputStream, metadata)) + } yield () + + + def s3Delete(bucket: String, at: String): Task[Unit] = + for { + client <- s3Client + _ <- ZIO.attempt(client.deleteObject(bucket, at)) + } yield () + + + def s3Tag(bucket: String, at: String, tags: Map[String, String]): Task[Unit] = + for { + client <- s3Client + s3Tags = tags.map { case (k, v) => new Tag(k, v) } + request = new SetObjectTaggingRequest(bucket, at, new ObjectTagging(s3Tags.toList.asJava)) + _ <- ZIO.attempt(client.setObjectTagging(request)) + } yield () + + + def sesCreateTemplate(template: Template): Task[Unit] = + for { + client <- sesClient + request = new CreateTemplateRequest().withTemplate(template) + - <- ZIO.fromFutureJava(client.createTemplateAsync(request)) + } yield () + + + def sesDeleteTemplate(name: String): Task[Unit] = + for { + client <- sesClient + request = new DeleteTemplateRequest().withTemplateName(name) + - <- ZIO.fromFutureJava(client.deleteTemplateAsync(request)) + } yield () + + + def sesSendEmail(message: Message, to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] = + for { + client <- sesClient + destination = new Destination().withBccAddresses(bcc.asJava).withCcAddresses(cc.asJava).withToAddresses(to.asJava) + configurationSet <- config.string("aws.ses.configurationSet", "default") + request = new SendEmailRequest() + .withConfigurationSetName(configurationSet) + .withSource(sender) + .withDestination(destination) + .withMessage(message) + .withReplyToAddresses(replyTo.asJava) + - <- ZIO.fromFutureJava(client.sendEmailAsync(request)) + } yield () + + + def sesSendTemplatedEmail(template: String, templateValues: Map[String, String], to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] = + for { + client <- sesClient + destination = new Destination().withBccAddresses(bcc.asJava).withCcAddresses(cc.asJava).withToAddresses(to.asJava) + configurationSet <- config.string("aws.ses.configurationSet", "default") + request = new SendTemplatedEmailRequest() + .withConfigurationSetName(configurationSet) + .withSource(sender) + .withDestination(destination) + .withReplyToAddresses(replyTo.asJava) + .withTemplate(template) + .withTemplateData(templateValues.asJson.noSpaces) + - <- ZIO.fromFutureJava(client.sendTemplatedEmailAsync(request)) + } yield () + + + def sesSendBulkTemplatedEmail(template: String, toWithTemplateValues: List[(String, Map[String, String])], sender: String, replyTo: List[String] = List()): Task[Unit] = + for { + client <- sesClient + configurationSet <- config.string("aws.ses.configurationSet", "default") + destinations = toWithTemplateValues.map(t => + new BulkEmailDestination() + .withDestination(new Destination().withToAddresses(List(t._1).asJava)) + .withReplacementTemplateData(t._2.asJson.noSpaces) + ).asJavaCollection + request = new SendBulkTemplatedEmailRequest() + .withConfigurationSetName(configurationSet) + .withSource(sender) + .withDestinations(destinations) + .withDefaultTemplateData("{}") + .withTemplate(template) + .withReplyToAddresses(replyTo.asJava) + - <- ZIO.fromFutureJava(client.sendBulkTemplatedEmailAsync(request)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws_s3/AwsS3.scala b/jvm/src/main/scala/com/harana/modules/aws_s3/AwsS3.scala new file mode 100644 index 0000000..6a0d14f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws_s3/AwsS3.scala @@ -0,0 +1,114 @@ +package com.harana.modules.aws_s3 + +import io.vertx.core.buffer.Buffer +import io.vertx.ext.reactivestreams.{ReactiveReadStream, ReactiveWriteStream} +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider +import software.amazon.awssdk.regions.Region +import software.amazon.awssdk.services.s3.S3AsyncClient +import software.amazon.awssdk.services.s3.model._ +import zio.Task +import zio.macros.accessible + +import java.time.Instant + +@accessible +trait AwsS3 { + + def newClient(credentials: AwsCredentialsProvider, + region: Option[Region] = None, + endpoint: Option[String] = None, + targetThroughput: Option[Double] = None): Task[S3AsyncClient] + + def createBucket(client: S3AsyncClient, bucket: String): Task[Unit] + + def deleteBucket(client: S3AsyncClient, bucket: String): Task[Unit] + + def listBuckets(client: S3AsyncClient): Task[List[Bucket]] + + def bucketExists(client: S3AsyncClient, bucket: String): Task[Boolean] + + def getBucketPolicy(client: S3AsyncClient, bucket: String): Task[String] + + def getBucketAcl(client: S3AsyncClient, bucket: String): Task[GetBucketAclResponse] + + def putBucketAcl(client: S3AsyncClient, bucket: String, acl: BucketCannedACL): Task[Unit] + + def listObjects(client: S3AsyncClient, bucket: String, prefix: Option[String] = None): Task[ListObjectsV2Response] + + def deleteObject(client: S3AsyncClient, bucket: String, key: String): Task[Unit] + + def deleteObjects(client: S3AsyncClient, bucket: String, identifiers: List[ObjectIdentifier]): Task[Unit] + + def getObject(client: S3AsyncClient, + bucket: String, + key: String, + ifMatch: Option[String] = None, + ifNoneMatch: Option[String] = None, + ifModifiedSince: Option[Instant] = None, + ifUnmodifiedSince: Option[Instant] = None, + range: Option[String] = None): Task[(GetObjectResponse, ReactiveReadStream[Buffer])] + + def putObject(client: S3AsyncClient, + bucket: String, + key: String, + writeStream: ReactiveWriteStream[Buffer], + acl: ObjectCannedACL, + contentLength: Option[Long] = None, + contentMD5: Option[String] = None, + storageClass: Option[String] = None, + tags: Map[String, String] = Map()): Task[String] + + def copyObject(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + sourceIfMatch: Option[String] = None, + sourceIfNoneMatch: Option[String] = None, + sourceIfModifiedSince: Option[Instant] = None, + sourceIfUnmodifiedSince: Option[Instant] = None): Task[CopyObjectResult] + + def getObjectAttributes(client: S3AsyncClient, bucket: String, key: String): Task[GetObjectAttributesResponse] + + def getObjectTagging(client: S3AsyncClient, bucket: String, key: String): Task[GetObjectTaggingResponse] + + def getObjectAcl(client: S3AsyncClient, bucket: String, key: String): Task[GetObjectAclResponse] + + def putObjectAcl(client: S3AsyncClient, bucket: String, key: String, acl: ObjectCannedACL): Task[Unit] + + def putObjectTagging(client: S3AsyncClient, bucket: String, key: String, tag: Map[String, String]): Task[Unit] + + def uploadPartCopy(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + uploadId: String, + partNumber: Int, + copySourceIfMatch: Option[String], + copySourceIfNoneMatch: Option[String], + copySourceIfModifiedSince: Option[Instant], + copySourceIfUnmodifiedSince: Option[Instant], + copySourceRange: Option[String]): Task[CopyPartResult] + + def uploadPart(client: S3AsyncClient, + bucket: String, + key: String, + uploadId: String, + partNumber: Int, + writeStream: ReactiveWriteStream[Buffer], + contentLength: Option[Long] = None): Task[String] + + def listParts(client: S3AsyncClient, bucket: String, key: String, uploadId: String): Task[List[Part]] + + def listMultipartUploads(client: S3AsyncClient, bucket: String, prefix: Option[String] = None): Task[List[MultipartUpload]] + + def createMultipartUpload(client: S3AsyncClient, bucket: String, key: String, cannedACL: ObjectCannedACL): Task[String] + + def abortMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String): Task[Unit] + + def completeMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String): Task[String] + + def presignedUrl(bucketName: String, key: String, expirationMinutes: Int): Task[String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws_s3/LiveAwsS3.scala b/jvm/src/main/scala/com/harana/modules/aws_s3/LiveAwsS3.scala new file mode 100644 index 0000000..eec7508 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws_s3/LiveAwsS3.scala @@ -0,0 +1,267 @@ +package com.harana.modules.aws_s3 + +import com.amazonaws.services.s3.internal.ServiceUtils +import com.harana.modules.aws_s3.LiveAwsS3.presigner +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.vertx.core.buffer.Buffer +import io.vertx.ext.reactivestreams.{ReactiveReadStream, ReactiveWriteStream} +import org.reactivestreams.{Subscriber, Subscription} +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider +import software.amazon.awssdk.core.async.{AsyncRequestBody, AsyncResponseTransformer, SdkPublisher} +import software.amazon.awssdk.regions.Region +import software.amazon.awssdk.services.s3.S3AsyncClient +import software.amazon.awssdk.services.s3.model._ +import software.amazon.awssdk.services.s3.presigner.S3Presigner +import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest +import zio.{Task, ZIO, ZLayer} + +import java.net.URI +import java.nio.ByteBuffer +import java.time.{Duration, Instant} +import java.util.Optional +import java.util.concurrent.CompletableFuture +import scala.jdk.CollectionConverters._ + +object LiveAwsS3 { + val presigner = S3Presigner.create() + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAwsS3(config, logger, micrometer) + } +} + +case class LiveAwsS3(config: Config, logger: Logger, micrometer: Micrometer) extends AwsS3 { + + def newClient(credentials: AwsCredentialsProvider, + region: Option[Region] = None, + endpoint: Option[String] = None, + targetThroughput: Option[Double] = None): Task[S3AsyncClient] = + for { + defaultRegion <- config.string("aws.defaultRegion") + clientBuilder = S3AsyncClient.crtBuilder() + .credentialsProvider(credentials) + .region(region.getOrElse(Region.of(defaultRegion))) + .targetThroughputInGbps(java.lang.Double.valueOf(targetThroughput.getOrElse(40.0))) + .minimumPartSizeInBytes(8 * 1024 * 1024) + client = if (endpoint.nonEmpty) clientBuilder.endpointOverride(URI.create(endpoint.get)).build() else clientBuilder.build() + } yield client + + def createBucket(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.createBucket(CreateBucketRequest.builder().bucket(bucket).build())).unit + + def deleteBucket(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.deleteBucket(DeleteBucketRequest.builder().bucket(bucket).build())).unit + + def listBuckets(client: S3AsyncClient) = + ZIO.fromCompletableFuture(client.listBuckets(ListBucketsRequest.builder().build())).map(_.buckets().asScala.toList) + + def bucketExists(client: S3AsyncClient, bucket: String) = + listBuckets(client).map(b => b.map(_.name()).contains(bucket)) + + def getBucketPolicy(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.getBucketPolicy(GetBucketPolicyRequest.builder().bucket(bucket).build())).map(_.policy()) + + def getBucketAcl(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.getBucketAcl(GetBucketAclRequest.builder().bucket(bucket).build())) + + def putBucketAcl(client: S3AsyncClient, bucket: String, acl: BucketCannedACL) = + ZIO.fromCompletableFuture(client.putBucketAcl(PutBucketAclRequest.builder().bucket(bucket).acl(acl).build())).unit + + def listObjects(client: S3AsyncClient, bucket: String, prefix: Option[String] = None) = { + var builder = ListObjectsV2Request.builder().bucket(bucket) + builder = if (prefix.nonEmpty) builder.prefix(prefix.get) else builder + ZIO.fromCompletableFuture(client.listObjectsV2(builder.build())) + } + + def deleteObject(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.deleteObject(DeleteObjectRequest.builder().bucket(bucket).key(key).build())).unit + + def deleteObjects(client: S3AsyncClient, bucket: String, identifiers: List[ObjectIdentifier]) = + ZIO.fromCompletableFuture( + client.deleteObjects(DeleteObjectsRequest.builder().bucket(bucket) + .delete(Delete.builder().objects(identifiers.asJava).build()).build()) + ).unit + + def getObject(client: S3AsyncClient, + bucket: String, + key: String, + ifMatch: Option[String] = None, + ifNoneMatch: Option[String] = None, + ifModifiedSince: Option[Instant] = None, + ifUnmodifiedSince: Option[Instant] = None, + range: Option[String] = None) = { + + val readStream = ReactiveReadStream.readStream[Buffer] + var response = Option.empty[GetObjectResponse] + + val builder = GetObjectRequest.builder() + .bucket(bucket) + .key(key) + + if (ifMatch.nonEmpty) builder.ifMatch(ifMatch.get) + if (ifNoneMatch.nonEmpty) builder.ifNoneMatch(ifNoneMatch.get) + if (ifModifiedSince.nonEmpty) builder.ifModifiedSince(ifModifiedSince.get) + if (ifUnmodifiedSince.nonEmpty) builder.ifUnmodifiedSince(ifUnmodifiedSince.get) + if (range.nonEmpty) builder.range(range.get) + + ZIO.fromCompletableFuture(client.getObject(builder.build(), new AsyncResponseTransformer[GetObjectResponse, Unit] { + override def onStream(publisher: SdkPublisher[ByteBuffer]) = + publisher.subscribe(new Subscriber[ByteBuffer] { + override def onSubscribe(sub: Subscription) = readStream.onSubscribe(sub) + override def onNext(t: ByteBuffer) = readStream.onNext(Buffer.buffer(t.array())) + override def onError(t: Throwable) = readStream.onError(t) + override def onComplete() = readStream.onComplete() + }) + + override def prepare() = new CompletableFuture[Unit] {} + override def onResponse(r: GetObjectResponse) = response = Some(r) + override def exceptionOccurred(error: Throwable) = readStream.onError(error) + })).as((response.get, readStream)) + } + + def getObjectAttributes(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.getObjectAttributes(GetObjectAttributesRequest.builder().bucket(bucket).key(key).build())) + + def getObjectTagging(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.getObjectTagging(GetObjectTaggingRequest.builder().bucket(bucket).key(key).build())) + + def putObject(client: S3AsyncClient, + bucket: String, + key: String, + writeStream: ReactiveWriteStream[Buffer], + acl: ObjectCannedACL, + contentLength: Option[Long] = None, + contentMD5: Option[String] = None, + storageClass: Option[String] = None, + tags: Map[String, String] = Map()) = + ZIO.fromCompletableFuture { + val builder = PutObjectRequest.builder() + .bucket(bucket) + .key(key) + .acl(acl) + .tagging(Tagging.builder().tagSet(tags.map { case (k,v) => Tag.builder().key(k).value(v).build() }.toList.asJava).build()) + + if (contentLength.nonEmpty) builder.contentLength(contentLength.get) + if (contentMD5.nonEmpty) builder.contentMD5(contentMD5.get) + if (storageClass.nonEmpty) builder.storageClass(storageClass.get) + + client.putObject(builder.build(), publisher(writeStream)) + }.map(_.eTag()) + + def copyObject(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + sourceIfMatch: Option[String] = None, + sourceIfNoneMatch: Option[String] = None, + sourceIfModifiedSince: Option[Instant] = None, + sourceIfUnmodifiedSince: Option[Instant] = None) = { + + val builder = CopyObjectRequest.builder() + .sourceBucket(sourceBucket).sourceKey(sourceKey) + .destinationBucket(destinationBucket).destinationKey(destinationKey) + + if (sourceIfMatch.nonEmpty) builder.copySourceIfMatch(sourceIfMatch.get) + if (sourceIfNoneMatch.nonEmpty) builder.copySourceIfNoneMatch(sourceIfNoneMatch.get) + if (sourceIfModifiedSince.nonEmpty) builder.copySourceIfModifiedSince(sourceIfModifiedSince.get) + if (sourceIfUnmodifiedSince.nonEmpty) builder.copySourceIfUnmodifiedSince(sourceIfUnmodifiedSince.get) + + ZIO.fromCompletableFuture(client.copyObject(builder.build())).map(_.copyObjectResult()) + } + + def getObjectAcl(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.getObjectAcl(GetObjectAclRequest.builder().bucket(bucket).key(key).build())) + + def putObjectAcl(client: S3AsyncClient, bucket: String, key: String, acl: ObjectCannedACL) = + ZIO.fromCompletableFuture(client.putObjectAcl(PutObjectAclRequest.builder().bucket(bucket).key(key).acl(acl).build())).unit + + def putObjectTagging(client: S3AsyncClient, bucket: String, key: String, tags: Map[String, String]) = + ZIO.fromCompletableFuture(client.putObjectTagging( + PutObjectTaggingRequest.builder().bucket(bucket).key(key) + .tagging(Tagging.builder().tagSet(tags.map { case (k, v) => Tag.builder().key(k).value(v).build() }.toList.asJava).build()) + .build()) + ).unit + + def uploadPartCopy(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + uploadId: String, + partNumber: Int, + copySourceIfMatch: Option[String], + copySourceIfNoneMatch: Option[String], + copySourceIfModifiedSince: Option[Instant], + copySourceIfUnmodifiedSince: Option[Instant], + copySourceRange: Option[String]) = + ZIO.fromCompletableFuture { + val builder = UploadPartCopyRequest.builder() + .sourceBucket(sourceBucket).sourceKey(sourceKey) + .destinationBucket(destinationBucket).destinationKey(destinationKey) + .partNumber(partNumber).uploadId(uploadId) + + if (copySourceRange.nonEmpty) builder.copySourceRange(copySourceRange.get) + if (copySourceIfMatch.nonEmpty) builder.copySourceIfMatch(copySourceIfMatch.get) + if (copySourceIfNoneMatch.nonEmpty) builder.copySourceIfNoneMatch(copySourceIfNoneMatch.get) + if (copySourceIfModifiedSince.nonEmpty) builder.copySourceIfModifiedSince(copySourceIfModifiedSince.get) + if (copySourceIfUnmodifiedSince.nonEmpty) builder.copySourceIfUnmodifiedSince(copySourceIfUnmodifiedSince.get) + + client.uploadPartCopy(builder.build()) + }.map(_.copyPartResult()) + + def uploadPart(client: S3AsyncClient, + bucket: String, + key: String, + uploadId: String, + partNumber: Int, + writeStream: ReactiveWriteStream[Buffer], + contentLength: Option[Long] = None) = { + val builder = UploadPartRequest.builder().bucket(bucket).key(key).partNumber(partNumber).uploadId(uploadId) + if (contentLength.nonEmpty) builder.contentLength(contentLength.get) + ZIO.fromCompletableFuture(client.uploadPart(builder.build(), AsyncRequestBody.fromPublisher(publisher(writeStream)))).map(_.eTag()) + } + + def listParts(client: S3AsyncClient, bucket: String, key: String, uploadId: String) = + ZIO.fromCompletableFuture(client.listParts(ListPartsRequest.builder().bucket(bucket).key(key).uploadId(uploadId).build())).map(_.parts().asScala.toList) + + def listMultipartUploads(client: S3AsyncClient, bucket: String, prefix: Option[String] = None) = { + val builder = ListMultipartUploadsRequest.builder().bucket(bucket) + if (prefix.nonEmpty) builder.prefix(prefix.get) + ZIO.fromCompletableFuture(client.listMultipartUploads(builder.build())).map(_.uploads().asScala.toList) + } + + def createMultipartUpload(client: S3AsyncClient, bucket: String, key: String, cannedACL: ObjectCannedACL) = + ZIO.fromCompletableFuture(client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(bucket).key(key).acl(cannedACL).build())).map(_.uploadId()) + + def abortMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String) = + ZIO.fromCompletableFuture(client.abortMultipartUpload(AbortMultipartUploadRequest.builder().bucket(bucket).key(key).uploadId(uploadId).build())).unit + + def completeMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String) = + ZIO.fromCompletableFuture(client.completeMultipartUpload(CompleteMultipartUploadRequest.builder().bucket(bucket).key(key).uploadId(uploadId).build())).map(_.eTag()) + + def presignedUrl(bucketName: String, key: String, expirationMinutes: Int): Task[String] = { + ZIO.attempt { + val presignRequest = GetObjectPresignRequest.builder.signatureDuration(Duration.ofMinutes(expirationMinutes)).build() + presigner.presignGetObject(presignRequest).url().toString + } + } + + private def publisher(writeStream: ReactiveWriteStream[Buffer]) = + new AsyncRequestBody() { + def contentLength: Optional[java.lang.Long] = Optional.empty + def subscribe(s: Subscriber[_ >: ByteBuffer]) = + writeStream.subscribe(new Subscriber[Buffer] { + def onSubscribe(sub: Subscription) = s.onSubscribe(sub) + def onNext(t: Buffer) = s.onNext(t.getByteBuf.nioBuffer()) + def onError(t: Throwable) = s.onError(t) + def onComplete() = s.onComplete() + }) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/buildpack/Buildpack.scala b/jvm/src/main/scala/com/harana/modules/buildpack/Buildpack.scala new file mode 100644 index 0000000..848cb05 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/buildpack/Buildpack.scala @@ -0,0 +1,30 @@ +package com.harana.modules.buildpack + +import zio.Task +import zio.macros.accessible + +import java.io.File + +@accessible +trait Buildpack { + + def build(name: String, + path: File, + builder: Option[String] = None, + environmentVariables: Map[String, String] = Map(), + mountedVolumes: Map[File, File] = Map(), + network: Option[String] = None, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] + + def setDefaultBuilder(name: String): Task[List[String]] + + def rebase(name: String, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] + +} + +object Buildpack { + type ContainerId = String +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/buildpack/LiveBuildpack.scala b/jvm/src/main/scala/com/harana/modules/buildpack/LiveBuildpack.scala new file mode 100644 index 0000000..5c53d5a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/buildpack/LiveBuildpack.scala @@ -0,0 +1,77 @@ +package com.harana.modules.buildpack + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.process.Command +import zio.{Task, ZIO, ZLayer} + +import java.io.File +import java.util.Locale +import scala.collection.mutable + +object LiveBuildpack { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveBuildpack(config, logger, micrometer) + } +} + +case class LiveBuildpack(config: Config, logger: Logger, micrometer: Micrometer) extends Buildpack { + + private val buildpackCmd = ZIO.attempt { + val os = System.getProperty("os.name").toLowerCase(Locale.ROOT) + val url = { + if (os.contains("mac")) getClass.getResource("pack/mac/pack") + if (os.contains("win")) getClass.getResource("pack/windows/pack.exe") + getClass.getResource("pack/linux/pack") + } + url.getFile + } + + + def build(name: String, + path: File, + builder: Option[String] = None, + environmentVariables: Map[String, String] = Map(), + mountedVolumes: Map[File, File] = Map(), + network: Option[String] = None, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] = + for { + cmd <- buildpackCmd + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]("build", "name", s"--path ${path.getAbsolutePath}") + if (builder.nonEmpty) args += s"--builder ${builder.get}" + if (environmentVariables.nonEmpty) args += s"--env ${environmentVariables.map { case (k,v) => s"$k=$v" }.mkString(",")}" + if (mountedVolumes.nonEmpty) args += s"--volume ${mountedVolumes.map { case (k,v) => s"$k:$v" }.mkString(",")}" + if (network.nonEmpty) args += s"--network ${network.get}" + if (publish.nonEmpty) args += s"--publish" + if (runImage.nonEmpty) args += s"--run-image ${runImage.get}s" + args + } + cmd <- Command(cmd, args.toSeq: _*).lines + } yield cmd.toList + + + def setDefaultBuilder(name: String): Task[List[String]] = + Command("pack", List("set-default-builder", name): _*).lines.map(_.toList) + + + def rebase(name: String, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] = + for { + cmd <- buildpackCmd + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]() + if (publish.nonEmpty) args += s"--publish" + if (runImage.nonEmpty) args += s"--run-image ${runImage.get}s" + args + } + cmd <- Command(cmd, args.toSeq: _*).lines + } yield cmd.toList +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/calcite/Calcite.scala b/jvm/src/main/scala/com/harana/modules/calcite/Calcite.scala new file mode 100644 index 0000000..5203dbd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/calcite/Calcite.scala @@ -0,0 +1,11 @@ +package com.harana.modules.calcite + +import zio.Task +import zio.macros.accessible + +@accessible +trait Calcite { + + def rewrite(userId: String, query: String): Task[String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite.scala b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite.scala new file mode 100644 index 0000000..d6547a8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite.scala @@ -0,0 +1,125 @@ +package com.harana.modules.calcite + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.common.User.UserId +import org.apache.calcite.config.Lex +import org.apache.calcite.sql.SqlKind._ +import org.apache.calcite.sql.dialect.CalciteSqlDialect +import org.apache.calcite.sql.parser.SqlParser +import org.apache.calcite.sql._ +import zio.{Task, ZLayer, ZIO} + +import scala.jdk.CollectionConverters._ + +object LiveCalcite { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveCalcite(config, logger, micrometer) + } +} + +case class LiveCalcite(config: Config, logger: Logger, micrometer: Micrometer) extends Calcite { + + def rewrite(userId: UserId, query: String): Task[String] = { + ZIO.attempt(parse(userId, query)) + } + + private val CONFIG = SqlParser.configBuilder.setLex(Lex.MYSQL).build + + + private def parse(userId: String, sql: String): String = { + val sqlParser = SqlParser.create(sql, CONFIG) + val sqlNode = sqlParser.parseStmt + + sqlNode.getKind match { + case INSERT => + val sqlInsert = sqlNode.asInstanceOf[SqlInsert] + val source = sqlInsert.getSource.asInstanceOf[SqlSelect] + parseSource(source, userId) + + case SELECT => + parseSource(sqlNode.asInstanceOf[SqlSelect], userId) + + case ORDER_BY => + println("Order by not currently supported") + + case _ => + throw new IllegalArgumentException("It must be an insert SQL, sql:" + sql) + } + + sqlNode.toSqlString(CalciteSqlDialect.DEFAULT).getSql + } + + + private def parseSource(sqlSelect: SqlSelect, userId: String): Unit = { + parseSelectList(sqlSelect.getSelectList, userId) + parseFrom(sqlSelect.getFrom, userId) match { + case Some(newIdentifier) => sqlSelect.setFrom(newIdentifier) + case None => + } + } + + + private def parseSelectList(sqlNodeList: SqlNodeList, userId: String): Unit = + sqlNodeList.asScala.foreach(parseSelect(_, userId)) + + + private def parseFrom(from: SqlNode, userId: String): Option[SqlIdentifier] = + from.getKind match { + case IDENTIFIER => + val identifier = from.asInstanceOf[SqlIdentifier] + Some(identifier.setName(0, s"$userId.$identifier")) + + case AS => + val sqlBasicCall = from.asInstanceOf[SqlBasicCall] + + parseFrom(sqlBasicCall.getOperandList.asScala.head, userId) match { + case Some(newIdentifier) => sqlBasicCall.setOperand(0, newIdentifier) + case None => + } + + None + + case SELECT => + parseSource(from.asInstanceOf[SqlSelect], userId) + None + + case JOIN => + val sqlJoin = from.asInstanceOf[SqlJoin] + + parseFrom(sqlJoin.getLeft, userId) match { + case Some(newIdentifier) => sqlJoin.setLeft(newIdentifier) + case None => + } + + parseFrom(sqlJoin.getRight, userId) match { + case Some(newIdentifier) => sqlJoin.setRight(newIdentifier) + case None => + } + + None + + case _ => None + } + + + private def parseSelect(sqlNode: SqlNode, userId: String): Unit = + sqlNode.getKind match { + case IDENTIFIER => + + case AS => + val firstNode = sqlNode.asInstanceOf[SqlBasicCall].getOperandList.asScala.head + parseSelect(firstNode, userId) + + case SELECT => + parseSource(sqlNode.asInstanceOf[SqlSelect], userId) + + case _ => + + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite2.scala b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite2.scala new file mode 100644 index 0000000..e9d90c9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite2.scala @@ -0,0 +1,115 @@ +package com.harana.modules.calcite + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.common.User.UserId +import org.apache.calcite.sql.SqlKind._ +import org.apache.calcite.sql.dialect.CalciteSqlDialect +import org.apache.calcite.sql.parser.SqlParser +import org.apache.calcite.sql._ +import zio.{Task, ZLayer, ZIO} + +import scala.collection.mutable.ListBuffer + +object LiveCalcite2 { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveCalcite2(config, logger, micrometer) + } +} + +case class LiveCalcite2(config: Config, logger: Logger, micrometer: Micrometer) extends Calcite { + + def rewrite(userId: UserId, query: String): Task[String] = + for { + node <- ZIO.attempt(SqlParser.create(query).parseQuery()) + sqlNode <- ZIO.attempt(tableNames(userId, node)) + } yield sqlNode.toSqlString(CalciteSqlDialect.DEFAULT).getSql + + private def tableNames(userId: UserId, sqlNode: SqlNode): SqlNode = { + val node = if (sqlNode.getKind.equals(ORDER_BY)) sqlNode.asInstanceOf[SqlOrderBy].query else sqlNode + processFrom(userId, node) + } + + private def processFrom(userId: UserId, node: SqlNode): SqlNode = { + val childNode = node.asInstanceOf[SqlSelect].getFrom + if (childNode == null) return node + + childNode.getKind match { + case IDENTIFIER => + println(childNode.toSqlString(CalciteSqlDialect.DEFAULT)) + val identifier = childNode.asInstanceOf[SqlIdentifier] + val newIdentifier = identifier.setName(0, s"$userId.${identifier.names.get(0)}") + node.asInstanceOf[SqlSelect].setFrom(newIdentifier) + node + case AS => + println(childNode.toSqlString(CalciteSqlDialect.DEFAULT)) + val call = childNode.asInstanceOf[SqlBasicCall] + if (call.operand(0).isInstanceOf[SqlIdentifier]) { + val newIdentifier = setName(userId, call.operand(0).asInstanceOf[SqlIdentifier]) + call.setOperand(0, newIdentifier) + node.asInstanceOf[SqlSelect].setFrom(call) + } + node + + case JOIN => + println(childNode.toSqlString(CalciteSqlDialect.DEFAULT)) + val fromNode = childNode.asInstanceOf[SqlJoin] + + if (fromNode.getLeft.getKind.equals(AS)) { + val newLeftIdentifier = setName(userId, leftNode(fromNode)) + val newRightIdentifier = setName(userId, rightNode(fromNode)) + fromNode.getLeft.asInstanceOf[SqlBasicCall].setOperand(0, newLeftIdentifier) + fromNode.getRight.asInstanceOf[SqlBasicCall].setOperand(0, newRightIdentifier) + node + } + else { + val tables = ListBuffer[String]() + + fromNode.getLeft.getKind match { + case IDENTIFIER => + if (fromNode.getRight.getKind.equals(IDENTIFIER)) { + val newLeftIdentifier = setName(userId, fromNode.getLeft.asInstanceOf[SqlIdentifier]) + val newRightIdentifier = setName(userId, fromNode.getRight.asInstanceOf[SqlIdentifier]) + fromNode.setLeft(newLeftIdentifier) + fromNode.setRight(newRightIdentifier) + node + } else { + println(fromNode.getLeft.toString) + node + } + + case JOIN => + var leftJoin = fromNode.getLeft.asInstanceOf[SqlJoin] + + while (!leftJoin.getLeft.getKind.equals(AS) && leftJoin.getLeft.isInstanceOf[SqlJoin]) { + // tables += rightNode(leftJoin) + leftJoin = leftJoin.getLeft.asInstanceOf[SqlJoin] + } + + if (leftJoin.getLeft.isInstanceOf[SqlBasicCall]) + println(s"Left C = ${leftNode(leftJoin)}") + + if (leftJoin.getRight.isInstanceOf[SqlBasicCall]) + println(s"Right C = ${rightNode(leftJoin)}") + + node + //tables.toList ++ List(left(leftJoin), right(leftJoin)) + } + } + } + } + + private def setName(userId: UserId, identifier: SqlIdentifier) = + identifier.setName(0, s"$userId.${identifier.names.get(0)}") + + private def leftNode(node: SqlJoin) = + node.getLeft.asInstanceOf[SqlBasicCall].operand(0).asInstanceOf[SqlIdentifier] + + private def rightNode(node: SqlJoin) = + node.getRight.asInstanceOf[SqlBasicCall].operand(0).asInstanceOf[SqlIdentifier] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/Clearbit.scala b/jvm/src/main/scala/com/harana/modules/clearbit/Clearbit.scala new file mode 100644 index 0000000..5aa33a3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/Clearbit.scala @@ -0,0 +1,12 @@ +package com.harana.modules.clearbit + +import com.harana.modules.clearbit.models.RiskResponse +import zio.Task +import zio.macros.accessible + +@accessible +trait Clearbit { + + def calculateRisk(emailAddress: String, ipAddress: String, firstName: String, lastName: String): Task[RiskResponse] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/LiveClearbit.scala b/jvm/src/main/scala/com/harana/modules/clearbit/LiveClearbit.scala new file mode 100644 index 0000000..b066d76 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/LiveClearbit.scala @@ -0,0 +1,33 @@ +package com.harana.modules.clearbit + +import com.harana.modules.clearbit.models.RiskResponse +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.circe.parser._ +import zio.{Task, ZLayer, ZIO} + +object LiveClearbit { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveClearbit(config, http, logger, micrometer) + } +} + +case class LiveClearbit(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Clearbit { + + def calculateRisk(emailAddress: String, ipAddress: String, firstName: String, lastName: String): Task[RiskResponse] = + for { + apiKey <- config.secret("clearbit-api-key") + _ <- logger.debug(s"Calculating risk for email: $emailAddress") + params = Map("email" -> emailAddress, "given_name" -> firstName, "family_name" -> lastName, "ip" -> ipAddress) + response <- http.postForm("https://risk.clearbit.com/v1/calculate", params, credentials = Some((apiKey, ""))).mapError(e => new Exception(e.toString)).onError(e => logger.error(s"Failed to calculate risk: ${e.prettyPrint}")) + risk <- ZIO.fromEither(decode[RiskResponse](response.body().string())).onError(e => logger.error(s"Failed to decode risk to RiskResponse object: ${e.prettyPrint}")) + } yield risk + +} diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/ClearbitError.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/ClearbitError.scala new file mode 100644 index 0000000..264ff31 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/ClearbitError.scala @@ -0,0 +1,3 @@ +package com.harana.modules.clearbit.models + +case class ClearbitError() \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/Common.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/Common.scala new file mode 100644 index 0000000..85b8161 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/Common.scala @@ -0,0 +1,34 @@ +package com.harana.modules.clearbit.models + +case class Facebook(handle: String) + +case class LinkedIn(handle: String) + +case class AngelList(handle: String, + bio: String, + blog: String, + site: String, + followers: Long, + avatar: String) + +case class Crunchbase(handle: String) + + +case class Github(handle: String, + id: Long, + avatar: String, + company: String, + blog: String, + followers: Long, + following: Long) + +case class Twitter(handle: String, + id: String, + bio: String, + followers: Long, + following: Long, + statuses: Long, + favorites: Long, + location: String, + site: String, + avatar: String) diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/Company.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/Company.scala new file mode 100644 index 0000000..8c169ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/Company.scala @@ -0,0 +1,66 @@ +package com.harana.modules.clearbit.models + +case class Company(id: String, + name: String, + legalName: String, + domain: String, + domainAliases: List[String], + logo: String, + site: Site, + tags: List[String], + category: Category, + description: String, + foundedYear: Integer, + location: String, + timeZone: String, + utcOffset: Long, + geo: CompanyGeo, + metrics: Metrics, + facebook: Facebook, + linkedin: LinkedIn, + twitter: Twitter, + crunchbase: Crunchbase, + emailProvider: Boolean, + `type`: String, + ticker: String, + phone: String, + indexedAt: String, + tech: List[String], + parent: Parent) + +case class Site(title: String, + h1: String, + metaDescription: String, + phoneNumbers: List[String], + emailAddresses: List[String]) + +case class Category(sector: String, + industryGroup: String, + industry: String, + subIndustry: String, + sicCode: String, + naicsCode: String) + +case class CompanyGeo(streetNumber: String, + streetName: String, + subPremise: String, + city: String, + state: String, + stateCode: String, + postalCode: String, + country: String, + countryCode: String, + lat: Double, + lng: Double) + +case class Metrics(alexaUsRank: Long, + alexaGlobalRank: Long, + employees: Long, + employeesRange: String, + marketCap: Long, + raised: Long, + annualRevenue: Long, + fiscalYearEnd: Long, + estimatedAnnualRevenue: String) + +case class Parent(domain: String) diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/ModelType.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/ModelType.scala new file mode 100644 index 0000000..91fd613 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/ModelType.scala @@ -0,0 +1,8 @@ +package com.harana.modules.clearbit.models + +sealed trait ModelType +object ModelType { + case object PERSON extends ModelType + case object COMPANY extends ModelType + case object PERSON_COMPANY extends ModelType +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/Person.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/Person.scala new file mode 100644 index 0000000..bb14b77 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/Person.scala @@ -0,0 +1,55 @@ +package com.harana.modules.clearbit.models + +case class Person(id: String, + name: Name, + email: String, + gender: String, + location: String, + timeZone: String, + utcOffset: Long, + geo: PersonGeo, + bio: String, + site: String, + avatar: String, + employment: Employment, + facebook: Facebook, + github: Github, + twitter: Twitter, + linkedin: LinkedIn, + aboutme: AboutMe, + gravatar: Gravatar, + fuzzy: Boolean, + emailProvider: Boolean, + indexedAt: String) + +case class Name(fullName: String, + givenName: String, + familyName: String) + +case class PersonGeo(city: String, + state: String, + stateCode: String, + country: String, + countryCode: String, + lat: Double, + lng: Double) + +case class Employment(name: String, + title: String, + domain: String, + role: String, + subRole: String, + seniority: String) + +case class AboutMe(handle: String, + bio: String, + avatar: String) + +case class Gravatar(handle: String, + urls: List[Url], + avatar: String, + avatars: List[Avatar]) + +case class Url(value: String, title: String) + +case class Avatar(url: String, `type`: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/PersonCompany.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/PersonCompany.scala new file mode 100644 index 0000000..2ce81b7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/PersonCompany.scala @@ -0,0 +1,3 @@ +package com.harana.modules.clearbit.models + +case class PersonCompany(person: Person, company: Company) diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/RiskResponse.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/RiskResponse.scala new file mode 100644 index 0000000..d92b8f0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/RiskResponse.scala @@ -0,0 +1,35 @@ +package com.harana.modules.clearbit.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class RiskResponse(id: String, + live: Boolean, + fingerprint: Boolean, + email: Email, + address: Address, + ip: IP, + risk: Risk) + +@JsonCodec +case class Email(valid: Option[Boolean], + socialMatch: Option[Boolean], + companyMatch: Option[Boolean], + nameMatch: Option[Boolean], + disposable: Option[Boolean], + freeProvider: Option[Boolean], + blacklisted: Option[Boolean]) + +@JsonCodec +case class Address(geoMatch: Option[String]) + +@JsonCodec +case class IP(proxy: Option[Boolean], + geoMatch: Option[Boolean], + blacklisted: Boolean, + rateLimited: Option[Boolean]) + +@JsonCodec +case class Risk(level: String, + score: Int, + reasons: List[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/WebhookResponse.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/WebhookResponse.scala new file mode 100644 index 0000000..c0d3ecd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/WebhookResponse.scala @@ -0,0 +1,6 @@ +package com.harana.modules.clearbit.models + +case class WebhookResponse(`type`: ModelType, + body: Object, + status: Int, + id: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/docker/Docker.scala b/jvm/src/main/scala/com/harana/modules/docker/Docker.scala new file mode 100644 index 0000000..b2c67bf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/docker/Docker.scala @@ -0,0 +1,297 @@ +package com.harana.modules.docker + +import com.github.dockerjava.api.command._ +import com.github.dockerjava.api.exception.{DockerException, NotFoundException, UnauthorizedException} +import com.github.dockerjava.api.model.Network.Ipam +import com.github.dockerjava.api.model.{Service => DockerService, _} +import com.harana.modules.core.http.models.OkHttpError +import zio.macros.accessible +import zio.{IO, Queue, UIO} + +import java.io.{File, InputStream} + +@accessible +trait Docker { + + def attachContainer(id: ContainerId): IO[DockerException, InputStream] + + def auth(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): IO[UnauthorizedException, String] + + def auth(config: Option[AuthConfig]): IO[UnauthorizedException, String] + + def authConfig(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): UIO[AuthConfig] + + def buildImage(dockerFileOrFolder: File, tags: Set[String]): IO[DockerException, ImageId] + + def buildImage(inputStream: InputStream, tags: Set[String]): IO[DockerException, ImageId] + + def commit(id: ContainerId): IO[NotFoundException, String] + + def connectToNetwork: UIO[Unit] + + def containerDiff(id: ContainerId): IO[NotFoundException, List[ChangeLog]] + + def containerExists(containerName: String): UIO[Boolean] + + def containerNotExists(containerName: String): UIO[Boolean] + + def containerRunning(containerName: String): UIO[Boolean] + + def containerNotRunning(containerName: String): UIO[Boolean] + + def copyResourceFromContainer(id: ContainerId, resource: String, hostPath: Option[String] = None): IO[NotFoundException, InputStream] + + def copyArchiveToContainer(id: ContainerId, tarInputStream: InputStream, remotePath: Option[String] = None): IO[NotFoundException, Unit] + + def copyResourceToContainer(id: ContainerId, resource: String, remotePath: Option[String] = None): IO[NotFoundException, Unit] + + def createContainer(name: String, + imageName: String, + command: Option[String] = None, + exposedPorts: Map[Int, Int] = Map()): IO[DockerException, ContainerId] + + def createImage(repository: String, imageStream: InputStream): IO[NotFoundException, ImageId] + + def createNetwork(name: Option[String] = None, + attachable: Boolean = false, + checkDuplicate: Boolean = false, + driver: Option[String] = None, + enableIpv6: Boolean = false, + internal: Boolean = false, + ipam: Option[Ipam] = None, + labels: Map[String, String] = Map(), + options: Map[String, String] = Map()): IO[DockerException, NetworkId] + + def createService(spec: ServiceSpec): IO[NotFoundException, ServiceId] + + def createVolume(name: String, driver: Option[String] = None, driverOpts: Map[String, String] = Map()): IO[NotFoundException, String] + + def disconnectFromNetwork(networkId: Option[NetworkId] = None, containerId: Option[ContainerId] = None, force: Boolean = false): UIO[Unit] + + def ensureContainerIsRunning(name: String, imageName: String, command: Option[String] = None, exposedPorts: Map[Int, Int] = Map()): IO[DockerException, Unit] + + def ensureLocalRegistryIsRunning: IO[DockerException, Unit] + + def events(containerFilter: List[ContainerId] = List(), + eventFilter: List[EventId] = List(), + imageFilter: List[ImageId] = List(), + labelFilter: Map[String, String] = Map(), + withSince: Option[String] = None, + withUntil: Option[String] = None): UIO[Queue[Event]] + + def execCreate(id: ExecId, + attachStderr: Boolean = false, + attachStdin: Boolean = false, + attachStdout: Boolean = false, + cmd: List[String] = List(), + containerId: Option[ContainerId] = None, + env: List[String] = List(), + privileged: Boolean = false, + tty: Boolean = false, + user: Option[String] = None, + workingDir: Option[String] = None): IO[NotFoundException, String] + + def execStart(id: ExecId, + detach: Boolean = false, + stdIn: Option[InputStream] = None, + tty: Boolean = false): UIO[Queue[Frame]] + + def hubTags(namespace: String, + repository: String, + page: Option[Int] = None, + pageSize: Option[Int] = None): zio.Task[List[HubTag]] + + def info: UIO[Info] + + def initializeSwarm(spec: SwarmSpec): UIO[Unit] + + def inspectContainer(id: ContainerId): IO[NotFoundException, InspectContainerResponse] + + def inspectExec(id: ExecId): IO[NotFoundException, InspectExecResponse] + + def inspectImage(id: ImageId): IO[NotFoundException, InspectImageResponse] + + def inspectNetwork(id: Option[NetworkId] = None): IO[NotFoundException, Network] + + def inspectService(id: ServiceId): IO[NotFoundException, DockerService] + + def inspectSwarm: IO[NotFoundException, Swarm] + + def inspectVolume(name: String): IO[NotFoundException, InspectVolumeResponse] + + def joinSwarm(advertiseAddr: Option[String] = None, + joinToken: Option[String] = None, + listenAddr: Option[String] = None, + remoteAddrs: List[String] = List()): UIO[Unit] + + def killContainer(id: ContainerId, signal: Option[String] = None): IO[NotFoundException, Unit] + + // def launchListenContainer: IO[DockerException, List[String]] + + def leaveSwarm(force: Boolean = false): UIO[Unit] + + def listArtifactoryRepositories(registryUrl: String, + repository: String, + authConfig: AuthConfig): IO[OkHttpError, List[String]] + + def listArtifactoryTags(registryUrl: String, + repository: String, + authConfig: AuthConfig, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] + + def listContainers(ancestorFilter: List[String] = List(), + before: Option[String] = None, + exitedFilter: Option[Int] = None, + filters: Map[String, List[String]] = Map(), + idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + limit: Option[Int] = None, + nameFilter: List[String] = List(), + networkFilter: List[String] = List(), + showAll: Option[Boolean] = None, + showSize: Option[Boolean] = None, + since: Option[String] = None, + statusFilter: List[String] = List(), + volumeFilter: List[String] = List()): UIO[List[Container]] + + def listDockerRepositories(registryUrl: String): IO[OkHttpError, List[String]] + + def listDockerTags(registryUrl: String, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] + + def listImages(danglingFilter: Option[Boolean] = None, + imageNameFilter: Option[String] = None, + labelFilter: Map[String, String] = Map(), + showAll: Option[Boolean] = None): UIO[List[Image]] + + def listNetworks(filter: Option[(String, List[String])] = None, + idFilter: List[String] = List(), + nameFilter: List[String] = List()): IO[NotFoundException, List[Network]] + + def listServices(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List()): IO[NotFoundException, List[DockerService]] + + def listSwarmNodes(idFilter: List[String] = List(), + membershipFilter: List[String] = List(), + nameFilter: List[String] = List(), + roleFilter: List[String] = List()): IO[NotFoundException, List[SwarmNode]] + + def listTasks(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List(), + nodeFilter: List[String] = List(), + serviceFilter: List[String] = List(), + stateFilter: List[TaskState] = List()): IO[NotFoundException, List[Task]] + + def listVolumes(includeDangling: Boolean = true, filter: Option[(String, List[String])] = None) : IO[NotFoundException, List[InspectVolumeResponse]] + + def loadImage(stream: InputStream): UIO[Unit] + + def logContainer(id: ContainerId, + followStream: Option[Boolean] = None, + since: Option[Int] = None, + stdErr: Option[Boolean] = None, + stdOut: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] + + def logService(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] + + def logTask(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] + + def pauseContainer(id: ContainerId): IO[NotFoundException, Unit] + + def ping: UIO[Unit] + + def prune(pruneType: PruneType, + dangling: Option[Boolean] = None, + labelFilter: List[String] = List(), + untilFilter: Option[String] = None): IO[NotFoundException, Long] + + def pullImage(repository: String, + authConfig: Option[AuthConfig] = None, + platform: Option[String] = None, + registry: Option[String] = None, + tag: Option[String] = None): IO[DockerException, Unit] + + def pushImage(name: String, + authConfig: Option[AuthConfig] = None, + tag: Option[String] = None): IO[DockerException, Unit] + + def removeContainer(id: ContainerId, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] + + def removeContainers(name: String, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] + + def removeImage(id: ImageId, force: Boolean = false, prune: Boolean = true): IO[NotFoundException, Unit] + + def removeNetwork(id: NetworkId): IO[NotFoundException, Unit] + + def removeService(id: ServiceId): IO[NotFoundException, Unit] + + def removeVolume(name: String): IO[NotFoundException, Unit] + + def renameContainer(id: ContainerId, name: String): UIO[Unit] + + def restartContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] + + def saveImage(name: String, tag: Option[String] = None): IO[NotFoundException, InputStream] + + def searchImages(term: String): UIO[List[SearchItem]] + + def startContainer(id: ContainerId): IO[DockerException, Unit] + + + def startLocalRegistry: IO[DockerException, Unit] + + def stats(id: ContainerId): IO[DockerException, Statistics] + + def stopContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] + + def stopLocalRegistry: IO[DockerException, Unit] + + def tagImage(id: ImageId, imageNameWithRepository: String, tag: String, force: Boolean = false): UIO[Unit] + + def topContainer(id: ContainerId, psArgs: Option[String] = None): IO[NotFoundException, TopContainerResponse] + + def unpauseContainer(id: ContainerId): IO[NotFoundException, Unit] + + def updateContainer(id: ContainerId, + blkioWeight: Option[Int] = None, + cpuPeriod: Option[Int] = None, + cpuQuota: Option[Int] = None, + cpusetCpus: Option[String] = None, + cpusetMems: Option[String] = None, + cpuShares: Option[Int] = None, + kernelMemory: Option[Long] = None, + memory: Option[Long] = None, + memoryReservation: Option[Long] = None, + memorySwap: Option[Long] = None): IO[NotFoundException, UpdateContainerResponse] + + def updateService(id: ServiceId, spec: ServiceSpec): UIO[Unit] + + def updateSwarm(spec: SwarmSpec): UIO[Unit] + + def updateSwarmNode(id: SwarmId, spec: SwarmNodeSpec, version: Option[Long] = None): IO[NotFoundException, Unit] + + def version: UIO[Unit] + + def waitForContainer(id: ContainerId): IO[DockerException, Int] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/docker/LiveDocker.scala b/jvm/src/main/scala/com/harana/modules/docker/LiveDocker.scala new file mode 100644 index 0000000..39c0454 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/docker/LiveDocker.scala @@ -0,0 +1,913 @@ +package com.harana.modules.docker + +import com.github.dockerjava.api.async.ResultCallback +import com.github.dockerjava.api.command._ +import com.github.dockerjava.api.exception.{DockerException, NotFoundException, UnauthorizedException} +import com.github.dockerjava.api.model.HostConfig.newHostConfig +import com.github.dockerjava.api.model.Network.Ipam +import com.github.dockerjava.api.model.Ports.Binding +import com.github.dockerjava.api.model.{Service => DockerService, _} +import com.github.dockerjava.core.{DefaultDockerClientConfig, DockerClientBuilder} +import com.github.dockerjava.zerodep.ZerodepDockerHttpClient +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.http.models.OkHttpError +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.docker.LiveDocker.registryImage +import io.circe.parser._ +import org.json4s.DefaultFormats +import zio.{IO, Queue, UIO, ZIO, ZLayer} + +import java.io.{Closeable, File, InputStream} +import scala.jdk.CollectionConverters._ + +object LiveDocker { + implicit val formats: DefaultFormats = DefaultFormats + val registryImage = "registry:latest" + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveDocker(config, http, logger, micrometer) + } +} + +case class LiveDocker(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Docker { + + private val client = for { + dockerHost <- config.string("docker.host", "127.0.0.1") + dockerPort <- config.int("docker.port", 1234) + tlsVerify <- config.boolean("docker.tlsVerify", default = false) + certPath <- config.optString("docker.certPath") + registryUsername <- config.optSecret("docker-registry-username") + registryPassword <- config.optSecret("docker-registry-password") + registryEmail <- config.optString("docker.registryEmail") + registryUrl <- config.optString("docker.registryUrl") + } yield { + val config = DefaultDockerClientConfig.createDefaultConfigBuilder() + .withDockerHost("unix:///var/run/docker.sock") + //.withDockerHost(s"tcp://$dockerHost:$dockerPort") + .withDockerTlsVerify(tlsVerify) + + if (certPath.nonEmpty) config.withDockerCertPath(certPath.get) + if (registryUsername.nonEmpty) config.withRegistryUsername(registryUsername.get) + if (registryPassword.nonEmpty) config.withRegistryPassword(registryPassword.get) + if (registryEmail.nonEmpty) config.withRegistryEmail(registryEmail.get) + if (registryUrl.nonEmpty) config.withRegistryUrl(registryUrl.get) + + DockerClientBuilder.getInstance(config.build()).withDockerHttpClient(new ZerodepDockerHttpClient.Builder().build()).build() + } + + + def attachContainer(id: ContainerId): IO[DockerException, InputStream] = + client.map(_.attachContainerCmd(id).getStdin) + + + def auth(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): IO[UnauthorizedException, String] = + for { + authConfig <- authConfig(username, password, identityToken, registryToken) + auth <- auth(Some(authConfig)) + } yield auth + + + def auth(config: Option[AuthConfig]): IO[UnauthorizedException, String] = + client.map { c => + val cmd = c.authCmd + if (config.nonEmpty) cmd.withAuthConfig(config.get) + cmd.exec().getIdentityToken + } + + + def authConfig(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): UIO[AuthConfig] = + (username, password, identityToken, registryToken) match { + case (Some(u), Some(p), _, _) => ZIO.succeed(new AuthConfig().withUsername(u).withPassword(p)) + case (_, _, Some(it), _) => ZIO.succeed(new AuthConfig().withIdentityToken(it)) + case (_, _, _, Some(rt)) => ZIO.succeed(new AuthConfig().withRegistrytoken(rt)) + case (_, _, _, _) => ZIO.succeed(new AuthConfig()) + } + + + def buildImage(dockerFileOrFolder: File, tags: Set[String]): IO[DockerException, ImageId] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, ImageId] => Unit) => + c.buildImageCmd(dockerFileOrFolder).withTags(tags.asJava).exec( + new BuildImageResultCallback() { + override def onNext(item: BuildResponseItem): Unit = cb(ZIO.succeed(item.getImageId)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + } + ) + } + } + + + def buildImage(inputStream: InputStream, tags: Set[String]): IO[DockerException, ImageId] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, ImageId] => Unit) => + c.buildImageCmd(inputStream).withTags(tags.asJava).exec( + new BuildImageResultCallback() { + override def onNext(item: BuildResponseItem): Unit = cb(ZIO.succeed(item.getImageId)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + } + ) + } + } + + def commit(id: ContainerId): IO[NotFoundException, String] = + client.map(_.commitCmd(id).exec()) + + + def connectToNetwork: UIO[Unit] = + client.map(_.connectToNetworkCmd().exec()) + + + def containerDiff(id: ContainerId): IO[NotFoundException, List[ChangeLog]] = + client.map(_.containerDiffCmd(id).exec().asScala.toList) + + + def containerExists(containerName: String): UIO[Boolean] = + listContainers(filters = Map("name" -> List(containerName))).map(_.nonEmpty) + + + def containerNotExists(containerName: String): UIO[Boolean] = + containerExists(containerName).map(result => !result) + + + def containerRunning(containerName: String): UIO[Boolean] = + listContainers(filters = Map("name" -> List(containerName), "status" -> List("running"))).map(_.nonEmpty) + + + def containerNotRunning(containerName: String): UIO[Boolean] = + containerRunning(containerName).map(result => !result) + + + def copyResourceFromContainer(id: ContainerId, resource: String, hostPath: Option[String] = None): IO[NotFoundException, InputStream] = + client.map { c => + val cmd = c.copyArchiveFromContainerCmd(id, resource) + if (hostPath.nonEmpty) cmd.withHostPath(hostPath.get) + cmd.exec() + } + + + def copyArchiveToContainer(id: ContainerId, tarInputStream: InputStream, remotePath: Option[String] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.copyArchiveToContainerCmd(id).withTarInputStream(tarInputStream) + if (remotePath.nonEmpty) cmd.withRemotePath(remotePath.get) + cmd.exec() + } + + + def copyResourceToContainer(id: ContainerId, resource: String, remotePath: Option[String] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.copyArchiveToContainerCmd(id).withHostResource(resource) + if (remotePath.nonEmpty) cmd.withRemotePath(remotePath.get) + cmd.exec() + } + + def createContainer(name: String, + imageName: String, + command: Option[String] = None, + exposedPorts: Map[Int, Int] = Map()): IO[DockerException, ContainerId] = { + val bindings = new Ports() + exposedPorts.foreach { case (k, v) => bindings.bind(new ExposedPort(k), Binding.bindPort(v)) } + + client.map(_.createContainerCmd(imageName).withName(name) + .withHostConfig(newHostConfig().withPortBindings(bindings)) + .withExposedPorts(exposedPorts.keys.map(new ExposedPort(_)).toList.asJava) + .exec().getId + ) + } + + def createImage(repository: String, imageStream: InputStream): IO[NotFoundException, ImageId] = + client.map(_.createImageCmd(repository, imageStream).exec().getId) + + + def createNetwork(name: Option[String] = None, + attachable: Boolean = false, + checkDuplicate: Boolean = false, + driver: Option[String] = None, + enableIpv6: Boolean = false, + internal: Boolean = false, + ipam: Option[Ipam] = None, + labels: Map[String, String] = Map(), + options: Map[String, String] = Map()): IO[DockerException, NetworkId] = + client.map { c => + val cmd = c.createNetworkCmd + .withAttachable(attachable) + .withCheckDuplicate(checkDuplicate) + .withEnableIpv6(enableIpv6) + .withInternal(internal) + .withLabels(labels.asJava) + .withOptions(options.asJava) + if (ipam.nonEmpty) cmd.withIpam(ipam.get) + if (name.nonEmpty) cmd.withName(name.get) + cmd.exec().getId + } + + + def createService(spec: ServiceSpec): IO[NotFoundException, ServiceId] = + client.map(_.createServiceCmd(spec).exec().getId) + + + def createVolume(name: String, driver: Option[String] = None, driverOpts: Map[String, String] = Map()): IO[NotFoundException, String] = + client.map { c => + val cmd = c.createVolumeCmd.withName(name).withDriverOpts(driverOpts.asJava) + if (driver.nonEmpty) cmd.withDriver(driver.get) + cmd.exec().getMountpoint + } + + + def disconnectFromNetwork(networkId: Option[NetworkId] = None, containerId: Option[ContainerId] = None, force: Boolean = false): UIO[Unit] = + client.map { c => + val cmd = c.disconnectFromNetworkCmd().withForce(force) + if (containerId.nonEmpty) cmd.withContainerId(containerId.get) + if (networkId.nonEmpty) cmd.withNetworkId(networkId.get) + cmd.exec() + } + + + def ensureContainerIsRunning(name: String, imageName: String, command: Option[String] = None, exposedPorts: Map[Int, Int] = Map()): IO[DockerException, Unit] = + for { + running <- containerRunning(name) + existing <- listContainers(nameFilter = List(name), showAll = Some(true )) + _ <- ZIO.when(!running)(logger.debug(s"$name not running, starting ${if (existing.isEmpty) "new" else "existing"} container.")) + _ <- ZIO.when(!running && existing.isEmpty)( + for { + _ <- pullImage(imageName) + id <- createContainer(name, imageName, command, exposedPorts) + _ <- startContainer(id) + } yield () + ) + _ <- ZIO.when(!running && existing.nonEmpty)(startContainer(existing.head.getId)) + } yield () + + + def ensureLocalRegistryIsRunning: IO[DockerException, Unit] = + ensureContainerIsRunning("registry", registryImage, exposedPorts = Map(5000 -> 5000)) + + + def events(containerFilter: List[ContainerId] = List(), + eventFilter: List[EventId] = List(), + imageFilter: List[ImageId] = List(), + labelFilter: Map[String, String] = Map(), + withSince: Option[String] = None, + withUntil: Option[String] = None): UIO[Queue[Event]] = + for { + c <- client + q <- Queue.unbounded[Event] + } yield { + val cmd = c.eventsCmd() + .withContainerFilter(containerFilter: _*) + .withEventFilter(eventFilter: _*) + .withImageFilter(imageFilter: _*) + .withLabelFilter(labelFilter.asJava) + if (withSince.nonEmpty) cmd.withSince(withSince.get) + if (withUntil.nonEmpty) cmd.withUntil(withUntil.get) + + cmd.exec( + new ResultCallback[Event]() { + override def onNext(item: Event): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def execCreate(id: ExecId, + attachStderr: Boolean = false, + attachStdin: Boolean = false, + attachStdout: Boolean = false, + cmd: List[String] = List(), + containerId: Option[ContainerId] = None, + env: List[String] = List(), + privileged: Boolean = false, + tty: Boolean = false, + user: Option[String] = None, + workingDir: Option[String] = None): IO[NotFoundException, String] = + client.map { c => + var execCreateCmd = c.execCreateCmd(id) + .withAttachStderr(attachStderr) + .withAttachStdin(attachStdin) + .withAttachStdout(attachStdout) + .withCmd(cmd: _*) + .withEnv(env.asJava) + .withPrivileged(privileged) + .withTty(tty) + if (containerId.nonEmpty) execCreateCmd = execCreateCmd.withContainerId(containerId.get) + if (user.nonEmpty) execCreateCmd = execCreateCmd.withUser(user.get) + execCreateCmd.exec().getId + } + + + def execStart(id: ExecId, + detach: Boolean = false, + stdIn: Option[InputStream] = None, + tty: Boolean = false): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.execStartCmd(id).withDetach(detach).withTty(tty) + if (stdIn.nonEmpty) cmd.withStdIn(stdIn.get) + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + def hubTags(namespace: String, + repository: String, + page: Option[Int] = None, + pageSize: Option[Int] = None): zio.Task[List[HubTag]] = + for { + page <- ZIO.succeed(page.getOrElse(1)) + pageSize <- ZIO.succeed(pageSize.getOrElse(10)) + query = s"https://hub.docker.com/v2/namespaces/$namespace/repositories/$repository/tags?page=$page&page_size=$pageSize" + response <- http.get(query).mapError(ex => new Exception(ex.toString)) + hubTags <- ZIO.from(decode[HubPage](response.body().string())).map(_.results) + } yield hubTags + + + def info: UIO[Info] = + client.map(_.infoCmd().exec()) + + + def initializeSwarm(spec: SwarmSpec): UIO[Unit] = + client.map(_.initializeSwarmCmd(spec).exec()) + + + def inspectContainer(id: ContainerId): IO[NotFoundException, InspectContainerResponse] = + client.map(_.inspectContainerCmd(id).exec()) + + + def inspectExec(id: ExecId): IO[NotFoundException, InspectExecResponse] = + client.map(_.inspectExecCmd(id).exec()) + + + def inspectImage(id: ImageId): IO[NotFoundException, InspectImageResponse] = + client.map(_.inspectImageCmd(id).exec()) + + + def inspectNetwork(id: Option[NetworkId] = None): IO[NotFoundException, Network] = + client.map(_.inspectNetworkCmd().exec()) + + + def inspectService(id: ServiceId): IO[NotFoundException, DockerService] = + client.map(_.inspectServiceCmd(id).exec()) + + + def inspectSwarm: IO[NotFoundException, Swarm] = + client.map(_.inspectSwarmCmd().exec()) + + + def inspectVolume(name: String): IO[NotFoundException, InspectVolumeResponse] = + client.map(_.inspectVolumeCmd(name).exec()) + + + def joinSwarm(advertiseAddr: Option[String] = None, + joinToken: Option[String] = None, + listenAddr: Option[String] = None, + remoteAddrs: List[String] = List()): UIO[Unit] = + client.map { c => + val cmd = c.joinSwarmCmd().withRemoteAddrs(remoteAddrs.asJava) + if (advertiseAddr.nonEmpty) cmd.withAdvertiseAddr(advertiseAddr.get) + if (joinToken.nonEmpty) cmd.withJoinToken(joinToken.get) + if (listenAddr.nonEmpty) cmd.withListenAddr(listenAddr.get) + cmd.exec() + } + + + def killContainer(id: ContainerId, signal: Option[String] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.killContainerCmd(id) + if (signal.nonEmpty) cmd.withSignal(signal.get) + cmd.exec() + } + + + // def launchListenContainer: IO[DockerException, List[String]] = + // for { + // cmd <- Command( + // "docker", "run", "-d", "-v", "/var/run/docker.sock:/var/run/docker.sock", "-p", "127.0.0.1:1234:1234", + // "bobrik/socat", "TCP-LISTEN:1234,fork", "UNIX-CONNECT:/var/run/docker.sock").lines.provide(Has(blocking)) + // } yield cmd + + + def leaveSwarm(force: Boolean = false): UIO[Unit] = + client.map(_.leaveSwarmCmd().withForceEnabled(force).exec()) + + + def listArtifactoryRepositories(registryUrl: String, + repository: String, + authConfig: AuthConfig): IO[OkHttpError, List[String]] = +null +// for { +// token <- http.getAsJson(s"$registryUrl/v2/token").map(_.hcursor.downField("token").as[String]) +// headers = Map("Authorization" -> s"Bearer $token") +// url = s"$registryUrl/api/docker/$repository/v2/_catalog" +// repositoriesJson <- http.getAsJson(url, Map(), headers).map(_.hcursor.downField("repositories").as[String]) +// repositories = repositoriesJson.children.map(_.extract[String]) +// } yield repositories + + + def listArtifactoryTags(registryUrl: String, + repository: String, + authConfig: AuthConfig, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] = +null +// for { +// token <- http.getAsJson(s"$registryUrl/v2/token").map(_.hcursor.downField("token").as[String]) +// headers = Map("Authorization" -> s"Bearer $token") +// url = s"$registryUrl/api/docker/$repository/v2/${image}/tags/list?n=${maximum.getOrElse(5)}" +// repositories <- http.getAsJson(url, Map(), headers).map(_.hcursor.downField("tags").as[List[String]]) +// } yield repositories + + + def listContainers(ancestorFilter: List[String] = List(), + before: Option[String] = None, + exitedFilter: Option[Int] = None, + filters: Map[String, List[String]] = Map(), + idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + limit: Option[Int] = None, + nameFilter: List[String] = List(), + networkFilter: List[String] = List(), + showAll: Option[Boolean] = None, + showSize: Option[Boolean] = None, + since: Option[String] = None, + statusFilter: List[String] = List(), + volumeFilter: List[String] = List()): UIO[List[Container]] = + client.map { c => + val cmd = c.listContainersCmd + if (ancestorFilter.nonEmpty) cmd.withAncestorFilter(ancestorFilter.asJava) + if (filters.nonEmpty) filters.foreach { case (k, v) => cmd.getFilters.put(k, v.asJava) } + if (idFilter.nonEmpty) cmd.withIdFilter(idFilter.asJava) + if (labelFilter.nonEmpty) cmd.withLabelFilter(labelFilter.asJava) + if (nameFilter.nonEmpty) cmd.withNameFilter(nameFilter.asJava) + if (networkFilter.nonEmpty) cmd.withNetworkFilter(networkFilter.asJava) + if (statusFilter.nonEmpty) cmd.withStatusFilter(statusFilter.asJava) + if (volumeFilter.nonEmpty) cmd.withVolumeFilter(volumeFilter.asJava) + if (before.nonEmpty) cmd.withBefore(before.get) + if (exitedFilter.nonEmpty) cmd.withExitedFilter(exitedFilter.get) + if (limit.nonEmpty) cmd.withLimit(limit.get) + if (showAll.nonEmpty) cmd.withShowAll(showAll.get) + if (showSize.nonEmpty) cmd.withShowSize(showSize.get) + cmd.exec().asScala.toList + } + + + def listDockerRepositories(registryUrl: String): IO[OkHttpError, List[String]] = + null +// for { +// url <- ZIO.from(s"$registryUrl/v2/_catalog") +// repositoriesJson <- http.getAsJson(url, Map()).map(_ \ "repositories") +// repositories = repositoriesJson.children.map(_.extract[String]) +// } yield repositories + + + def listDockerTags(registryUrl: String, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] = + null +// for { +// url <- ZIO.from(s"$registryUrl/v2/${image}/tags/list?n=${maximum.getOrElse(5)}") +// repositories <- http.getAsJson(url, Map()).map(json => (json \ "tags").extract[List[String]]) +// } yield repositories + + + def listImages(danglingFilter: Option[Boolean] = None, + imageNameFilter: Option[String] = None, + labelFilter: Map[String, String] = Map(), + showAll: Option[Boolean] = None): UIO[List[Image]] = + client.map { c => + val cmd = c.listImagesCmd + .withLabelFilter(labelFilter.asJava) + if (danglingFilter.nonEmpty) cmd.withDanglingFilter(danglingFilter.get) + if (imageNameFilter.nonEmpty) cmd.withImageNameFilter(imageNameFilter.get) + if (showAll.nonEmpty) cmd.withShowAll(showAll.get) + cmd.exec().asScala.toList + } + + + def listNetworks(filter: Option[(String, List[String])] = None, + idFilter: List[String] = List(), + nameFilter: List[String] = List()): IO[NotFoundException, List[Network]] = + client.map { c => + val cmd = c.listNetworksCmd() + .withIdFilter(idFilter: _*) + .withNameFilter(nameFilter: _*) + if (filter.nonEmpty) cmd.withFilter(filter.get._1, filter.get._2.asJava) + cmd.exec().asScala.toList + } + + + def listServices(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List()): IO[NotFoundException, List[DockerService]] = + client.map(_.listServicesCmd() + .withIdFilter(idFilter.asJava) + .withLabelFilter(labelFilter.asJava) + .withNameFilter(nameFilter.asJava) + .exec().asScala.toList) + + + def listSwarmNodes(idFilter: List[String] = List(), + membershipFilter: List[String] = List(), + nameFilter: List[String] = List(), + roleFilter: List[String] = List()): IO[NotFoundException, List[SwarmNode]] = + client.map(_.listSwarmNodesCmd() + .withIdFilter(idFilter.asJava) + .withMembershipFilter(membershipFilter.asJava) + .withNameFilter(nameFilter.asJava) + .withRoleFilter(roleFilter.asJava) + .exec().asScala.toList) + + + def listTasks(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List(), + nodeFilter: List[String] = List(), + serviceFilter: List[String] = List(), + stateFilter: List[TaskState] = List()): IO[NotFoundException, List[Task]] = + client.map(_.listTasksCmd() + .withIdFilter(idFilter: _*) + .withLabelFilter(labelFilter.asJava) + .withNameFilter(nameFilter: _*) + .withNodeFilter(nodeFilter: _*) + .withServiceFilter(serviceFilter: _*) + .withStateFilter(stateFilter: _*) + .exec().asScala.toList) + + + def listVolumes(includeDangling: Boolean = true, filter: Option[(String, List[String])] = None): IO[NotFoundException, List[InspectVolumeResponse]] = + client.map { c => + val cmd = c.listVolumesCmd.withDanglingFilter(includeDangling) + if (filter.nonEmpty) cmd.withFilter(filter.get._1, filter.get._2.asJava) + cmd.exec().getVolumes.asScala.toList + } + + + def loadImage(stream: InputStream): UIO[Unit] = + client.map(_.loadImageCmd(stream).exec()) + + + def logContainer(id: ContainerId, + followStream: Option[Boolean] = None, + since: Option[Int] = None, + stderr: Option[Boolean] = None, + stdout: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.logContainerCmd(id) + if (followStream.nonEmpty) cmd.withFollowStream(followStream.get) + if (since.nonEmpty) cmd.withSince(since.get) + if (stderr.nonEmpty) cmd.withStdErr(stderr.get) + if (stdout.nonEmpty) cmd.withStdOut(stdout.get) + if (tail.nonEmpty) cmd.withTail(tail.get) + if (timestamps.nonEmpty) cmd.withTimestamps(timestamps.get) + + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def logService(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.logServiceCmd(id) + if (details.nonEmpty) cmd.withDetails(details.get) + if (follow.nonEmpty) cmd.withFollow(follow.get) + if (since.nonEmpty) cmd.withSince(since.get) + if (stderr.nonEmpty) cmd.withStderr(stderr.get) + if (stdout.nonEmpty) cmd.withStdout(stdout.get) + if (tail.nonEmpty) cmd.withTail(tail.get) + if (timestamps.nonEmpty) cmd.withTimestamps(timestamps.get) + + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def logTask(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.logTaskCmd(id) + if (details.nonEmpty) cmd.withDetails(details.get) + if (follow.nonEmpty) cmd.withFollow(follow.get) + if (since.nonEmpty) cmd.withSince(since.get) + if (stderr.nonEmpty) cmd.withStderr(stderr.get) + if (stdout.nonEmpty) cmd.withStdout(stdout.get) + if (tail.nonEmpty) cmd.withTail(tail.get) + if (timestamps.nonEmpty) cmd.withTimestamps(timestamps.get) + + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def pauseContainer(id: ContainerId): IO[NotFoundException, Unit] = + client.map(_.pauseContainerCmd(id).exec()) + + + def ping: UIO[Unit] = + client.map(_.pingCmd().exec()) + + + def prune(pruneType: PruneType, + dangling: Option[Boolean] = None, + labelFilter: List[String] = List(), + untilFilter: Option[String] = None): IO[NotFoundException, Long] = + client.map { c => + val cmd = c.pruneCmd(pruneType).withLabelFilter(labelFilter: _*) + if (dangling.nonEmpty) cmd.withDangling(dangling.get) + if (untilFilter.nonEmpty) cmd.withUntilFilter(untilFilter.get) + cmd.exec().getSpaceReclaimed + } + + + def pullImage(repository: String, + authConfig: Option[AuthConfig] = None, + platform: Option[String] = None, + registry: Option[String] = None, + tag: Option[String] = None): IO[DockerException, Unit] = + client.flatMap { c => + ZIO.async { cb => + val cmd = c.pullImageCmd(repository) + if (authConfig.nonEmpty) cmd.withAuthConfig(authConfig.get) + if (platform.nonEmpty) cmd.withPlatform(platform.get) + if (registry.nonEmpty) cmd.withRegistry(registry.get) + if (tag.nonEmpty) cmd.withTag(tag.get) + + cmd.exec( + new ResultCallback[PullResponseItem]() { + override def onNext(item: PullResponseItem): Unit = {} + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = cb(ZIO.unit) + override def close(): Unit = {} + } + ) + } + } + + + def pushImage(name: String, + authConfig: Option[AuthConfig] = None, + tag: Option[String] = None): IO[DockerException, Unit] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, Unit] => Unit) => + val cmd = c.pushImageCmd(name) + if (authConfig.nonEmpty) cmd.withAuthConfig(authConfig.get) + if (tag.nonEmpty) cmd.withTag(tag.get) + + cmd.exec( + new ResultCallback[PushResponseItem]() { + override def onNext(item: PushResponseItem): Unit = {} + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = cb(ZIO.unit) + override def close(): Unit = {} + } + ) + } + } + + + def removeContainer(id: ContainerId, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] = + client.map(_.removeContainerCmd(id).withForce(force).withRemoveVolumes(removeVolumes).exec()) + + + def removeContainers(name: String, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] = + for { + containers <- listContainers(nameFilter = List(name)) + _ <- ZIO.foreach(containers)(c => removeContainer(c.getId, force, removeVolumes)) + } yield () + + + def removeImage(id: ImageId, force: Boolean = false, prune: Boolean = true): IO[NotFoundException, Unit] = + client.map(_.removeImageCmd(id).withForce(force).withNoPrune(!prune).exec()) + + + def removeNetwork(id: NetworkId): IO[NotFoundException, Unit] = + client.map(_.removeNetworkCmd(id).exec()) + + + def removeService(id: ServiceId): IO[NotFoundException, Unit] = + client.map(_.removeServiceCmd(id).exec()) + + + def removeVolume(name: String): IO[NotFoundException, Unit] = + client.map(_.removeVolumeCmd(name).exec()) + + + def renameContainer(id: ContainerId, name: String): UIO[Unit] = + client.map(_.renameContainerCmd(id).withName(name).exec()) + + + def restartContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] = + client.map { c => + val cmd = c.restartContainerCmd(id) + if (timeout.nonEmpty) cmd.withTimeout(timeout.get) + cmd.exec() + } + + + def saveImage(name: String, tag: Option[String] = None): IO[NotFoundException, InputStream] = + client.map { c => + val cmd = c.saveImageCmd(name) + if (tag.nonEmpty) cmd.withTag(tag.get) + cmd.exec() + } + + + def searchImages(term: String): UIO[List[SearchItem]] = + client.map(_.searchImagesCmd(term).exec().asScala.toList) + + + def startContainer(id: ContainerId): IO[DockerException, Unit] = + client.map(_.startContainerCmd(id).exec()) + + + def startLocalRegistry: IO[DockerException, Unit] = + for { + _ <- logger.info("Starting Docker Registry") + _ <- ZIO.whenZIO(containerNotRunning("registry")) { + for { + _ <- logger.debug("Existing Docker Registry container not found. Starting a new one.") + _ <- pullImage(registryImage) + id <- createContainer("registry", registryImage, exposedPorts = Map(5000 -> 5000)) + _ <- startContainer(id) + } yield () + } + } yield () + + + def stats(id: ContainerId): IO[DockerException, Statistics] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, Statistics] => Unit) => + c.statsCmd(id).exec( + new ResultCallback[Statistics]() { + override def onNext(item: Statistics): Unit = cb(ZIO.succeed(item)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + } + } + + + def stopContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] = + client.map { c => + val cmd = c.stopContainerCmd(id) + if (timeout.nonEmpty) cmd.withTimeout(timeout.get) + cmd.exec() + } + + + def stopLocalRegistry: IO[DockerException, Unit] = + for { + _ <- logger.info("Stopping Zookeeper") + containers <- listContainers(nameFilter = List("zookeeper")) + _ <- ZIO.foreachDiscard(containers.map(_.getId))(id => stopContainer(id)) + } yield () + + def tagImage(id: ImageId, imageNameWithRepository: String, tag: String, force: Boolean = false): UIO[Unit] = + client.map(_.tagImageCmd(id, imageNameWithRepository, tag).withForce(force).exec()) + + + def topContainer(id: ContainerId, psArgs: Option[String] = None): IO[NotFoundException, TopContainerResponse] = + client.map { c => + val cmd = c.topContainerCmd(id) + if (psArgs.nonEmpty) cmd.withPsArgs(psArgs.get) + cmd.exec() + } + + + def unpauseContainer(id: ContainerId): IO[NotFoundException, Unit] = + client.map(_.unpauseContainerCmd(id).exec()) + + + def updateContainer(id: ContainerId, + blkioWeight: Option[Int] = None, + cpuPeriod: Option[Int] = None, + cpuQuota: Option[Int] = None, + cpusetCpus: Option[String] = None, + cpusetMems: Option[String] = None, + cpuShares: Option[Int] = None, + kernelMemory: Option[Long] = None, + memory: Option[Long] = None, + memoryReservation: Option[Long] = None, + memorySwap: Option[Long] = None): IO[NotFoundException, UpdateContainerResponse] = + client.map { c => + val cmd = c.updateContainerCmd(id) + if (blkioWeight.nonEmpty) cmd.withBlkioWeight(blkioWeight.get) + if (cpuPeriod.nonEmpty) cmd.withCpuPeriod(cpuPeriod.get) + if (cpuQuota.nonEmpty) cmd.withCpuQuota(cpuQuota.get) + if (cpusetCpus.nonEmpty) cmd.withCpusetCpus(cpusetCpus.get) + if (cpusetMems.nonEmpty) cmd.withCpusetMems(cpusetMems.get) + if (cpuShares.nonEmpty) cmd.withCpuShares(cpuShares.get) + if (kernelMemory.nonEmpty) cmd.withKernelMemory(kernelMemory.get) + if (memory.nonEmpty) cmd.withMemory(memory.get) + if (memoryReservation.nonEmpty) cmd.withMemoryReservation(memoryReservation.get) + if (memorySwap.nonEmpty) cmd.withMemorySwap(memorySwap.get) + cmd.exec() + } + + + def updateService(id: ServiceId, spec: ServiceSpec): UIO[Unit] = + client.map(_.updateServiceCmd(id, spec).exec()) + + + def updateSwarm(spec: SwarmSpec): UIO[Unit] = + client.map(_.updateSwarmCmd(spec).exec()) + + + def updateSwarmNode(id: SwarmId, spec: SwarmNodeSpec, version: Option[Long] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.updateSwarmNodeCmd().withSwarmNodeId(id).withSwarmNodeSpec(spec) + if (version.nonEmpty) cmd.withVersion(version.get) + cmd.exec() + } + + + def version: UIO[Unit] = + client.map(_.versionCmd().exec()) + + + def waitForContainer(id: ContainerId): IO[DockerException, Int] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, Int] => Unit) => + c.waitContainerCmd(id).exec( + new ResultCallback[WaitResponse]() { + override def onNext(item: WaitResponse): Unit = cb(ZIO.succeed(item.getStatusCode)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/docker/package.scala b/jvm/src/main/scala/com/harana/modules/docker/package.scala new file mode 100644 index 0000000..8925530 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/docker/package.scala @@ -0,0 +1,53 @@ +package com.harana.modules + +import io.circe.generic.JsonCodec + +package object docker { + + type ContainerId = String + type EventId = String + type ExecId = String + type ImageId = String + type NetworkId = String + type ServiceId = String + type SwarmId = String + type TaskId = String + + @JsonCodec + case class HubPage(count: Int, + next: Option[String], + previous: Option[String], + results: List[HubTag]) + + + @JsonCodec + case class HubTag(creator: Long, + id: Long, + images: List[HubImage] = List(), + last_updated: String, + last_updater: Long, + last_updater_username: String, + name: String, + repository: Long, + full_size: Long, + v2: Boolean, + tag_status: String, + tag_last_pulled: Option[String], + tag_last_pushed: Option[String], + media_type: String, + digest: String) + + @JsonCodec + case class HubImage(architecture: String, + features: String, + variant: Option[String], + digest: String, + os: String, + os_features: String, + os_version: Option[String], + size: Long, + status: String, + last_pulled: Option[String], + last_pushed: Option[String]) + +} diff --git a/jvm/src/main/scala/com/harana/modules/dremio/Dremio.scala b/jvm/src/main/scala/com/harana/modules/dremio/Dremio.scala new file mode 100644 index 0000000..0c8ec30 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/Dremio.scala @@ -0,0 +1,29 @@ +package com.harana.modules.dremio + +import com.harana.modules.dremio.models._ +import io.circe.Decoder +import zio.Task +import zio.macros.accessible + +@accessible +trait Dremio { + + def jobStatus(id: JobId): Task[JobStatus] + def jobResults(id: JobId, offset: Option[Int], limit: Option[Int]): Task[JobResults] + def cancelJob(id: JobId): Task[Unit] + + def getCatalog: Task[List[EntitySummary]] + def getCatalogEntity[E <: Entity](idOrPath: Either[EntityId, String])(implicit d: Decoder[E], m: Manifest[E]): Task[E] + def getCatalogEntityTags(id: EntityId): Task[List[String]] + def getCatalogEntityWiki(id: EntityId): Task[String] + + def updateCatalogEntity[E <: Entity](id: EntityId, entity: E): Task[Unit] + def updateCatalogEntityTags(id: EntityId, tags: List[String]): Task[Unit] + def updateCatalogEntityWiki(id: EntityId, text: String): Task[Unit] + def deleteCatalogEntity(id: EntityId): Task[Unit] + + def refreshCatalogEntity(id: EntityId): Task[Unit] + + def sql(sql: String): Task[JobId] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/LiveDremio.scala b/jvm/src/main/scala/com/harana/modules/dremio/LiveDremio.scala new file mode 100644 index 0000000..2f604e5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/LiveDremio.scala @@ -0,0 +1,161 @@ +package com.harana.modules.dremio + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.dremio.models._ +import io.circe.syntax._ +import io.circe.{Decoder, Json} +import zio.{Task, ZLayer, ZIO} + +object LiveDremio { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveDremio(config, http, logger, micrometer) + } +} + +case class LiveDremio(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Dremio { + + def jobStatus(id: JobId): Task[JobStatus] = + for { + _ <- logger.debug(s"Getting job status: $id") + response <- httpGet(s"/api/v3/job/$id") + jobStatus <- ZIO.fromTry(response.as[JobStatus].toTry) + } yield jobStatus + + + def jobResults(id: JobId, offset: Option[Int], limit: Option[Int]): Task[JobResults] = + for { + _ <- logger.debug(s"Getting job results: $id") + response <- httpGet(s"/api/v3/job/$id/results?offset=${offset.getOrElse("")}&limit=${limit.getOrElse("")}") + jobResults <- ZIO.fromTry(response.as[JobResults].toTry) + } yield jobResults + + + def cancelJob(id: JobId): Task[Unit] = + for { + _ <- logger.debug(s"Cancelling job: $id") + _ <- httpPost(s"/api/v3/job/$id/cancel", None) + } yield () + + + def getCatalog: Task[List[EntitySummary]] = + for { + _ <- logger.debug(s"Getting catalog") + response <- httpGet(s"/api/v3/catalog") + entities <- ZIO.fromTry(response.hcursor.downField("data").as[List[EntitySummary]].toTry) + } yield entities + + + def getCatalogEntity[E <: Entity](idOrPath: Either[EntityId, String])(implicit d: Decoder[E], m: Manifest[E]): Task[E] = + for { + _ <- logger.debug(s"Getting catalog entity: ${idOrPath.toString}") + url = idOrPath match { + case Left(u) => s"/api/v3/catalog" + case Right(u) => s"/api/v3/catalog/by-path" + } + json <- httpGet(url) + entity <- ZIO.fromTry(json.as[E].toTry) + } yield entity + + + def getCatalogEntityTags(id: EntityId): Task[List[String]] = + for { + _ <- logger.debug(s"Getting catalog entity tags: $id≥d") + response <- httpGet(s"/api/v3/catalog/$id/collaboration/tag") + tags <- ZIO.fromTry(response.hcursor.downField("tags").as[List[String]].toTry) + } yield tags + + + def getCatalogEntityWiki(id: EntityId): Task[String] = + for { + _ <- logger.debug(s"Getting catalog entity tags: $id≥d") + response <- httpGet(s"/api/v3/catalog/$id/collaboration/wiki") + wiki <- ZIO.fromTry(response.hcursor.downField("text").as[String].toTry) + } yield wiki + + + def updateCatalogEntity[E <: Entity](id: EntityId, entity: E): Task[Unit] = + for { + _ <- logger.debug(s"Updating catalog entity: $id") + + } yield () + + + def updateCatalogEntityTags(id: EntityId, tags: List[String]): Task[Unit] = + for { + _ <- logger.debug(s"Updating catalog entity tags: $id") + body <- ZIO.attempt(Map("tags" -> tags).asJson.noSpaces) + _ <- httpPost(s"/api/v3/catalog/$id/collaboration/tag", Some(body)) + } yield () + + + def updateCatalogEntityWiki(id: EntityId, text: String): Task[Unit] = + for { + _ <- logger.debug(s"Updating catalog entity wiki: $id") + body <- ZIO.attempt(Map("text" -> text).asJson.noSpaces) + _ <- httpPost(s"/api/v3/catalog/$id/collaboration/wiki", Some(body)) + } yield () + + + def deleteCatalogEntity(id: EntityId): Task[Unit] = + for { + _ <- logger.debug(s"Deleting catalog entity: $id") + _ <- httpDelete(s"/api/v3/catalog/$id") + } yield () + + + def refreshCatalogEntity(id: EntityId): Task[Unit] = + for { + _ <- logger.debug(s"Refreshing catalog entity: $id") + _ <- httpPost(s"/api/v3/catalog/$id/refresh", None) + } yield () + + + def sql(sql: String): Task[JobId] = + for { + _ <- logger.debug(s"SQL query: $sql") + response <- httpPost("/api/v3/sql", Some(Map("sql" -> sql).asJson.noSpaces)) + jobId <- ZIO.fromTry(response.hcursor.downField("id").as[String].toTry) + } yield jobId + + + private def getToken: Task[String] = + for { + username <- config.secret("dremio-username") + password <- config.secret("dremio-password") + body <- ZIO.attempt(Map("username" -> username, "password" -> password).asJson.noSpaces) + response <- httpPost("/api/v2/login", Some(body)) + token <- ZIO.fromTry(response.hcursor.downField("token").as[String].toTry) + } yield s"_dremio{$token}" + + + private def httpGet(suffix: String): Task[Json] = + for { + token <- getToken + host <- config.secret("dremio-host") + response <- http.getAsJson(s"http://$host$suffix", credentials = Some((token, ""))).mapError(e => new Exception(e.toString)) + } yield response + + + private def httpDelete(suffix: String): Task[Json] = + for { + token <- getToken + host <- config.secret("dremio-host") + response <- http.deleteAsJson(s"http://$host$suffix", credentials = Some((token, ""))).mapError(e => new Exception(e.toString)) + } yield response + + + private def httpPost(suffix: String, body: Option[String]): Task[Json] = + for { + token <- getToken + host <- config.secret("dremio-host") + response <- http.postAsJson(s"http://$host$suffix", mimeType = Some("application/json"), body = body, credentials = Some((token, ""))).mapError(e => new Exception(e.toString)) + } yield response +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/AccelerationRefreshPolicy.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/AccelerationRefreshPolicy.scala new file mode 100644 index 0000000..4f14fd8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/AccelerationRefreshPolicy.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class AccelerationRefreshPolicy(refreshPeriodMs: Long, + gracePeriodMs: Long, + method: AccelerationRefreshPolicyMethod, + refreshField: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Catalog.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Catalog.scala new file mode 100644 index 0000000..75be716 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Catalog.scala @@ -0,0 +1,6 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Catalog(data: List[EntitySummary]) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Dataset.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Dataset.scala new file mode 100644 index 0000000..4f726b2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Dataset.scala @@ -0,0 +1,18 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Dataset(entityType: String = "dataset", + id: String, + path: String, + tag: String, + `type`: DatasetType, + fields: List[DatasetField], + createdAt: String, + accelerationRefreshPolicy: Option[AccelerationRefreshPolicy], + sql: Option[String], + sqlContext: String, +// FIXME +// format: DatasetFormat, + approximateStatisticsAllowed: Boolean) extends Entity diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetField.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetField.scala new file mode 100644 index 0000000..3c2d41e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetField.scala @@ -0,0 +1,7 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class DatasetField(name: String, + `type`: DatasetFieldType) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFieldType.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFieldType.scala new file mode 100644 index 0000000..dccf169 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFieldType.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class DatasetFieldType(name: DatasetFieldName, + subSchema: DatasetField, + precision: Int, + scale: Int) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFormat.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFormat.scala new file mode 100644 index 0000000..ca08d0d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFormat.scala @@ -0,0 +1,34 @@ +package com.harana.modules.dremio.models + +import io.circe._ +import org.latestbit.circe.adt.codec._ + +// FIXME +//sealed trait DatasetFormat +//object DatasetFormat { +// +// implicit val encoder : Encoder[DatasetFormat] = JsonTaggedAdtCodec.createEncoder[DatasetFormat]("type") +// implicit val decoder : Decoder[DatasetFormat] = JsonTaggedAdtCodec.createDecoder[DatasetFormat]("type") +// +// case class Excel(sheetName: String, +// extractHeader: Boolean, +// hasMergedCells: Boolean) extends DatasetFormat +// +// case class JSON() extends DatasetFormat +// +// case class Parquet() extends DatasetFormat +// +// case class Text(fieldDelimiter: String, +// lineDelimiter: String, +// quote: String, +// comment: String, +// escape: String, +// skipFirstLine: Boolean, +// extractHeader: Boolean, +// trimHeader: Boolean, +// autoGenerateColumnNames: Boolean) extends DatasetFormat +// +// case class XLS(sheetName: String, +// extractHeader: Boolean, +// hasMergedCells: Boolean) extends DatasetFormat +//} diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Entity.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Entity.scala new file mode 100644 index 0000000..df7b12a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Entity.scala @@ -0,0 +1,8 @@ +package com.harana.modules.dremio.models + +abstract class Entity { + val entityType: String + val id: String + val path: String + val tag: String +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/EntitySummary.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/EntitySummary.scala new file mode 100644 index 0000000..cec83fb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/EntitySummary.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class EntitySummary(id: String, + path: String, + tag: String, + `type`: String, + datasetType: String, + containerType: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/File.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/File.scala new file mode 100644 index 0000000..e30c9f5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/File.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class File(entityType: String = "file", + id: String, + path: String, + tag: String) extends Entity \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Folder.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Folder.scala new file mode 100644 index 0000000..3097a9a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Folder.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Folder(entityType: String = "folder", + id: String, + path: String, + tag: String, + children: List[EntitySummary]) extends Entity \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationRelationship.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationRelationship.scala new file mode 100644 index 0000000..aac9ccc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationRelationship.scala @@ -0,0 +1,8 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobAccelerationRelationship(reflectionId: String, + datasetId: String, + relationship: JobAccelerationRelationshipType) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationStatus.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationStatus.scala new file mode 100644 index 0000000..9c05c19 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationStatus.scala @@ -0,0 +1,6 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobAccelerationStatus(reflectionRelationships: List[JobAccelerationRelationship]) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobFailure.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobFailure.scala new file mode 100644 index 0000000..5fa956a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobFailure.scala @@ -0,0 +1,7 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobFailure(errorMessage: String, + moreInfo: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobResults.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobResults.scala new file mode 100644 index 0000000..00d62d7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobResults.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.Json +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobResults(rowCount: Int, + schema: List[DatasetField], + rows: List[Json]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobStatus.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobStatus.scala new file mode 100644 index 0000000..7d94270 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobStatus.scala @@ -0,0 +1,12 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobStatus(jobState: JobStateJobQueryType, + queryType: JobQueryType, + startedAt: String, + endedAt: String, + rowCount: Option[Int], + acceleration: Option[JobAccelerationStatus], + errorMessage: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Source.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Source.scala new file mode 100644 index 0000000..7eb7dd1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Source.scala @@ -0,0 +1,6 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Source() diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/SourceMetadataPolicy.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/SourceMetadataPolicy.scala new file mode 100644 index 0000000..1625e08 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/SourceMetadataPolicy.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class SourceMetadataPolicy(authTTLMs: Long, + datasetRefreshAfterMs: Long, + datasetExpireAfterMs: Long, + namesRefreshMs: Long, + datasetUpdateMode: DatasetUpdateMode) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Space.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Space.scala new file mode 100644 index 0000000..344de41 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Space.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Space(entityType: String = "space", + id: String, + name: String, + tag: String, + path: String = "", + children: List[EntitySummary]) extends Entity \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/package.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/package.scala new file mode 100644 index 0000000..7271772 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/package.scala @@ -0,0 +1,150 @@ +package com.harana.modules.dremio + +import enumeratum.values._ +import io.circe.generic.JsonCodec + +package object models { + + type EntityId = String + type JobId = String + + sealed abstract class AWSElasticsearchAuthType(val value: String) extends StringEnumEntry + case object AWSElasticsearchAuthType extends StringEnum[AWSElasticsearchAuthType] with StringCirceEnum[AWSElasticsearchAuthType] { + case object AccessKey extends AWSElasticsearchAuthType("ACCESS_KEY") + case object EC2Metadata extends AWSElasticsearchAuthType("EC2_METADATA") + case object None extends AWSElasticsearchAuthType("NONE") + val values = findValues + } + + sealed abstract class AWSElasticsearchEncryptionValidationMode(val value: String) extends StringEnumEntry + case object AWSElasticsearchEncryptionValidationMode extends StringEnum[AWSElasticsearchEncryptionValidationMode] with StringCirceEnum[AWSElasticsearchEncryptionValidationMode] { + case object CertificateAndHostnameValidation extends AWSElasticsearchEncryptionValidationMode("CERTIFICATE_AND_HOSTNAME_VALIDATION") + case object CertificateOnlyValidation extends AWSElasticsearchEncryptionValidationMode("CERTIFICATE_ONLY_VALIDATION") + case object NoValidation extends AWSElasticsearchEncryptionValidationMode("NO_VALIDATION") + val values = findValues + } + + sealed abstract class StandardAuthType(val value: String) extends StringEnumEntry + case object StandardAuthType extends StringEnum[StandardAuthType] with StringCirceEnum[StandardAuthType] { + case object Anonymous extends StandardAuthType("ANONYMOUS") + case object Master extends StandardAuthType("MASTER") + val values = findValues + } + + sealed abstract class DatasetUpdateMode(val value: String) extends StringEnumEntry + case object DatasetUpdateMode extends StringEnum[DatasetUpdateMode] with StringCirceEnum[DatasetUpdateMode] { + case object Anonymous extends DatasetUpdateMode("PREFETCH") + case object Master extends DatasetUpdateMode("PREFETCH_QUERIED") + case object Inline extends DatasetUpdateMode("INLINE") + val values = findValues + } + + sealed abstract class ContainerType(val value: String) extends StringEnumEntry + case object ContainerType extends StringEnum[ContainerType] with StringCirceEnum[ContainerType] { + case object Home extends ContainerType("HOME") + case object Folder extends ContainerType("FOLDER") + case object Source extends ContainerType("SOURCE") + case object Space extends ContainerType("SPACE") + val values = findValues + } + + sealed abstract class DatasetType(val value: String) extends StringEnumEntry + case object DatasetType extends StringEnum[DatasetType] with StringCirceEnum[DatasetType] { + case object Physical extends DatasetType("PHYSICAL_DATASET") + case object Virtual extends DatasetType("VIRTUAL_DATASET") + val values = findValues + } + + sealed abstract class EntitySummaryType(val value: String) extends StringEnumEntry + case object EntitySummaryType extends StringEnum[EntitySummaryType] with StringCirceEnum[EntitySummaryType] { + case object Dataset extends EntitySummaryType("DATASET") + case object Container extends EntitySummaryType("CONTAINER") + case object File extends EntitySummaryType("FILE") + val values = findValues + } + + sealed abstract class EntitySummaryDatasetType(val value: String) extends StringEnumEntry + case object EntitySummaryDatasetType extends StringEnum[EntitySummaryDatasetType] with StringCirceEnum[EntitySummaryDatasetType] { + case object Virtual extends EntitySummaryDatasetType("VIRTUAL") + case object Promoted extends EntitySummaryDatasetType("PROMOTED") + case object Direct extends EntitySummaryDatasetType("DIRECT") + val values = findValues + } + + sealed abstract class AccelerationRefreshPolicyMethod(val value: String) extends StringEnumEntry + case object AccelerationRefreshPolicyMethod extends StringEnum[AccelerationRefreshPolicyMethod] with StringCirceEnum[AccelerationRefreshPolicyMethod] { + case object Full extends AccelerationRefreshPolicyMethod("FULL") + case object Incremental extends AccelerationRefreshPolicyMethod("INCREMENTAL") + val values = findValues + } + + sealed abstract class JobStateJobQueryType(val value: String) extends StringEnumEntry + case object JobStateJobQueryType extends StringEnum[JobStateJobQueryType] with StringCirceEnum[JobStateJobQueryType] { + case object Pending extends JobStateJobQueryType("PENDING") + case object MetadataRetrieval extends JobStateJobQueryType("METADATA_RETRIEVAL") + case object Planning extends JobStateJobQueryType("PLANNING") + case object Queued extends JobStateJobQueryType("QUEUED") + case object EngineStart extends JobStateJobQueryType("ENGINE_START") + case object ExecutionPlanning extends JobStateJobQueryType("EXECUTION_PLANNING") + case object Starting extends JobStateJobQueryType("STARTING") + case object Running extends JobStateJobQueryType("RUNNING") + case object Completed extends JobStateJobQueryType("COMPLETED") + case object Cancelled extends JobStateJobQueryType("CANCELED") + case object Failed extends JobStateJobQueryType("FAILED") + val values = findValues + } + + sealed abstract class JobQueryType(val value: String) extends StringEnumEntry + case object JobQueryType extends StringEnum[JobQueryType] with StringCirceEnum[JobQueryType] { + case object UIRun extends JobQueryType("UI_RUN") + case object UIPreview extends JobQueryType("UI_PREVIEW") + case object UIInternalPreview extends JobQueryType("UI_INTERNAL_PREVIEW") + case object UIInternalRun extends JobQueryType("UI_INTERNAL_RUN") + case object UIExport extends JobQueryType("UI_EXPORT") + case object ODBC extends JobQueryType("ODBC") + case object JDBC extends JobQueryType("JDBC") + case object REST extends JobQueryType("REST") + case object AcceleratorCreate extends JobQueryType("ACCELERATOR_CREATE") + case object AcceleratorDrop extends JobQueryType("ACCELERATOR_DROP") + case object Unknown extends JobQueryType("UNKNOWN") + case object PrepareInernal extends JobQueryType("PREPARE_INTERNAL") + case object AcceleratorExplain extends JobQueryType("ACCELERATOR_EXPLAIN") + case object UIInitialPreview extends JobQueryType("UI_INITIAL_PREVIEW") + val values = findValues + } + + sealed abstract class JobAccelerationRelationshipType(val value: String) extends StringEnumEntry + case object JobAccelerationRelationshipType extends StringEnum[JobAccelerationRelationshipType] with StringCirceEnum[JobAccelerationRelationshipType] { + case object Considered extends JobAccelerationRelationshipType("CONSIDERED") + case object Matched extends JobAccelerationRelationshipType("MATCHED") + case object Chosen extends JobAccelerationRelationshipType("CHOSEN") + val values = findValues + } + + sealed abstract class DatasetFieldName(val value: String) extends StringEnumEntry + case object DatasetFieldName extends StringEnum[DatasetFieldName] with StringCirceEnum[DatasetFieldName] { + case object Struct extends DatasetFieldName("STRUCT") + case object List extends DatasetFieldName("LIST") + case object Union extends DatasetFieldName("UNION") + case object Integer extends DatasetFieldName("INTEGER") + case object Bigint extends DatasetFieldName("BIGINT") + case object Float extends DatasetFieldName("FLOAT") + case object Double extends DatasetFieldName("DOUBLE") + case object Varchar extends DatasetFieldName("VARCHAR") + case object Varbinary extends DatasetFieldName("VARBINARY") + case object Boolean extends DatasetFieldName("BOOLEAN") + case object Decimal extends DatasetFieldName("DECIMAL") + case object Time extends DatasetFieldName("TIME") + case object Date extends DatasetFieldName("DATE") + case object Timestamp extends DatasetFieldName("TIMESTAMP") + case object IntervalDayToSecond extends DatasetFieldName("INTERVAL DAY TO SECOND") + case object IntervalDayToMonth extends DatasetFieldName("INTERVAL YEAR TO MONTH") + val values = findValues + } + + @JsonCodec + case class Host(hostname: String, port: Int) + + @JsonCodec + case class Property(name: String, value: String) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSElasticsearch.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSElasticsearch.scala new file mode 100644 index 0000000..238aa53 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSElasticsearch.scala @@ -0,0 +1,23 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.{AWSElasticsearchAuthType, AWSElasticsearchEncryptionValidationMode} +import io.circe.generic.JsonCodec + +@JsonCodec +case class AWSElasticsearch(hostname: String, + port: Int, + authenticationType: AWSElasticsearchAuthType, + accessKey: String, + accessSecret: String, + overwriteRegion: Boolean, + regionName: String, + scriptsEnabled: Boolean, + showHiddenIndices: Boolean, + showIdColumn: Boolean, + readTimeoutMillis: Long, + scrollTimeoutMillis: Long, + usePainless: Boolean, + scrollSize: Int, + allowPushdownOnNormalizedOrAnalyzedFields: Boolean, + warnOnRowCountMismatch: Boolean, + encryptionValidationMode: AWSElasticsearchEncryptionValidationMode) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSRedshift.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSRedshift.scala new file mode 100644 index 0000000..c2f8755 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSRedshift.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class AWSRedshift(username: String, + password: String, + authenticationType: StandardAuthType, + fetchSize: Int, + connectionString: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSS3.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSS3.scala new file mode 100644 index 0000000..3a3fbb8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSS3.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class AWSS3(accessKey: String, + accessSecret: String, + secure: Boolean, + externalBucketList: List[String], + propertyList: List[Property]) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureDataLakeStorage.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureDataLakeStorage.scala new file mode 100644 index 0000000..20957bc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureDataLakeStorage.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models.sources + +import io.circe.generic.JsonCodec + +@JsonCodec +case class AzureDataLakeStorage(mode: String = "CLIENT_KEY", + accountName: String, + clientId: String, + clientKeyRefreshUrl: String, + clientKeyPassword: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureStorage.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureStorage.scala new file mode 100644 index 0000000..821abfd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureStorage.scala @@ -0,0 +1,13 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class AzureStorage(accountKind: String, + accountName: String, + accessKey: String, + enableSSL: Boolean, + rootPath: String, + containers: List[String], + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Elasticsearch.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Elasticsearch.scala new file mode 100644 index 0000000..18d5752 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Elasticsearch.scala @@ -0,0 +1,19 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.{Host, StandardAuthType} +import io.circe.generic.JsonCodec + +@JsonCodec +case class Elasticsearch(username: String, + password: String, + hostList: List[Host], + authenticationType: StandardAuthType, + scriptsEnabled: Option[Boolean], + showHiddenIndices: Option[Boolean], + sslEnabled: Option[Boolean], + showIdColumn: Option[Boolean], + readTimeoutMillis: Option[Long], + scrollTimeoutMillis: Option[Long], + usePainless: Option[Boolean], + useWhitelist: Option[Boolean], + scrollSize: Option[Int]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/HDFS.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/HDFS.scala new file mode 100644 index 0000000..743ed2a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/HDFS.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class HDFS(hostname: String, + port: String, + kerberosPrincipal: String, + enableSasl: Option[Boolean], + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Hive.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Hive.scala new file mode 100644 index 0000000..d47e317 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Hive.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class Hive(hostname: String, + port: String, + kerberosPrincipal: String, + enableSasl: Option[Boolean], + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MongoDB.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MongoDB.scala new file mode 100644 index 0000000..c3ae66d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MongoDB.scala @@ -0,0 +1,16 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.{Host, Property, StandardAuthType} +import io.circe.generic.JsonCodec + +@JsonCodec +case class MongoDB(username: String, + password: String, + hostList: List[Host], + useSsl: Boolean, + authenticationType: StandardAuthType, + authDatabase: String, + authenticationTimeoutMillis: Long, + secondaryReadsOnly: Boolean, + subpartitionSize: Int, + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MySQL.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MySQL.scala new file mode 100644 index 0000000..1ed551b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MySQL.scala @@ -0,0 +1,12 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class MySQL(username: String, + password: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Oracle.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Oracle.scala new file mode 100644 index 0000000..2ac1e1d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Oracle.scala @@ -0,0 +1,13 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class Oracle(username: String, + password: String, + instance: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/PostgreSQL.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/PostgreSQL.scala new file mode 100644 index 0000000..0402e0b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/PostgreSQL.scala @@ -0,0 +1,13 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class PostgreSQL(username: String, + password: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int, + databaseName: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/SQLServer.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/SQLServer.scala new file mode 100644 index 0000000..b1b51b8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/SQLServer.scala @@ -0,0 +1,14 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class SQLServer(username: String, + password: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int, + database: Option[String], + showOnlyConnectiondatabase: Option[Boolean]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Snowflake.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Snowflake.scala new file mode 100644 index 0000000..09f4c4a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Snowflake.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class Snowflake(account: String, + username: String, + password: String, + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/email/Email.scala b/jvm/src/main/scala/com/harana/modules/email/Email.scala new file mode 100644 index 0000000..3c6562b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/Email.scala @@ -0,0 +1,18 @@ +package com.harana.modules.email + +import org.apache.commons.mail.EmailException +import zio.IO +import zio.macros.accessible + +@accessible +trait Email { + + def isValid(email: String): Boolean + + def domain(email: String): String + + def obfuscate(email: String): String + + def send(message: EmailMessage): IO[EmailException, String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/email/LiveEmail.scala b/jvm/src/main/scala/com/harana/modules/email/LiveEmail.scala new file mode 100644 index 0000000..6a039dc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/LiveEmail.scala @@ -0,0 +1,78 @@ +package com.harana.modules.email + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.email.models.{EmailAddress => hrmcEmailAddress} +import org.apache.commons.mail.{EmailAttachment, EmailException, HtmlEmail, MultiPartEmail, SimpleEmail} +import zio.{IO, ZIO, ZLayer} + +import javax.mail.internet.InternetAddress +import scala.jdk.CollectionConverters._ + +object LiveEmail { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveEmail(config, logger, micrometer) + } +} + +case class LiveEmail(config: Config, logger: Logger, micrometer: Micrometer) extends Email { + + def isValid(email: String): Boolean = + hrmcEmailAddress.isValid(email) + + def domain(email: String): String = + hrmcEmailAddress.Domain(email) + + def obfuscate(email: String): String = + hrmcEmailAddress(email).obfuscated.value + + def send(message: EmailMessage): IO[EmailException, String] = { + + val format = + if (message.attachments.nonEmpty) MultiPart + else if (message.richMessage.nonEmpty) Rich + else Plain + + val commonsMail = format match { + case Plain => new SimpleEmail().setMsg(message.message) + case Rich => new HtmlEmail().setHtmlMsg(message.richMessage.get).setTextMsg(message.message) + case MultiPart => + val multipartEmail = new MultiPartEmail() + message.attachments.foreach { file => + val attachment = new EmailAttachment() + attachment.setPath(file.getAbsolutePath) + attachment.setDisposition(EmailAttachment.ATTACHMENT) + attachment.setName(file.getName) + multipartEmail.attach(attachment) + } + multipartEmail.setMsg(message.message) + } + + message.to.foreach(ea => commonsMail.addTo(ea)) + message.cc.foreach(cc => commonsMail.addCc(cc)) + message.bcc.foreach(bcc => commonsMail.addBcc(bcc)) + + for { + host <- config.secret("email-host") + auth <- config.boolean("email.useAuthentication", default = false) + username <- config.secret("email-username") + password <- config.secret("email-password") + ssl <- config.boolean("email.useSSL", default = true) + port <- config.int("email.port", if (ssl) 25 else 587) + } yield { + commonsMail.setHostName(host) + if (auth) commonsMail.setAuthentication(username, password) + commonsMail.setSSLOnConnect(ssl) + commonsMail.setSmtpPort(port) + commonsMail.setFrom(message.from._1, message.from._2) + commonsMail.setSubject(message.subject) + if (message.replyTo.nonEmpty) commonsMail.setReplyTo(List(new InternetAddress(message.replyTo.get)).asJava) + commonsMail.send() + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/email/models/EmailAddress.scala b/jvm/src/main/scala/com/harana/modules/email/models/EmailAddress.scala new file mode 100644 index 0000000..5083783 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/models/EmailAddress.scala @@ -0,0 +1,29 @@ +package com.harana.modules.email.models + +case class EmailAddress(value: String) extends StringValue { + + val (mailbox, domain): (EmailAddress.Mailbox, EmailAddress.Domain) = value match { + case EmailAddress.validEmail(m, d) => (EmailAddress.Mailbox(m), EmailAddress.Domain(d)) + case invalidEmail => throw new IllegalArgumentException(s"'$invalidEmail' is not a valid email address") + } + + lazy val obfuscated = ObfuscatedEmailAddress.apply(value) +} + +object EmailAddress { + final val validDomain = """^([a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*)$""".r + final val validEmail = """^([a-zA-Z0-9.!#$%&’'*+/=?^_`{|}~-]+)@([a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*)$""".r + + def isValid(email: String) = email match { + case validEmail(_,_) => true + case invalidEmail => false + } + + case class Mailbox private[EmailAddress] (value: String) extends StringValue + case class Domain(value: String) extends StringValue { + value match { + case EmailAddress.validDomain(_) => // + case invalidDomain => throw new IllegalArgumentException(s"'$invalidDomain' is not a valid email domain") + } + } +} diff --git a/jvm/src/main/scala/com/harana/modules/email/models/ObfuscatedEmailAddress.scala b/jvm/src/main/scala/com/harana/modules/email/models/ObfuscatedEmailAddress.scala new file mode 100644 index 0000000..b68fce7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/models/ObfuscatedEmailAddress.scala @@ -0,0 +1,30 @@ +package com.harana.modules.email.models + +trait ObfuscatedEmailAddress { + val value: String + override def toString: String = value +} + +object ObfuscatedEmailAddress { + final val shortMailbox = "(.{1,2})".r + final val longMailbox = "(.)(.*)(.)".r + + import EmailAddress.validEmail + + implicit def obfuscatedEmailToString(e: ObfuscatedEmailAddress): String = e.value + + def apply(plainEmailAddress: String): ObfuscatedEmailAddress = new ObfuscatedEmailAddress { + val value = plainEmailAddress match { + case validEmail(shortMailbox(m), domain) => + s"${obscure(m)}@$domain" + + case validEmail(longMailbox(firstLetter,middle,lastLetter), domain) => + s"$firstLetter${obscure(middle)}$lastLetter@$domain" + + case invalidEmail => + throw new IllegalArgumentException(s"Cannot obfuscate invalid email address '$invalidEmail'") + } + } + + private def obscure(text: String) = "*" * text.length +} diff --git a/jvm/src/main/scala/com/harana/modules/email/models/StringValue.scala b/jvm/src/main/scala/com/harana/modules/email/models/StringValue.scala new file mode 100644 index 0000000..d6534a2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/models/StringValue.scala @@ -0,0 +1,11 @@ +package com.harana.modules.email.models + +object StringValue { + import scala.language.implicitConversions + implicit def stringValueToString(e: StringValue): String = e.value +} + +trait StringValue { + def value: String + override def toString: String = value +} diff --git a/jvm/src/main/scala/com/harana/modules/email/package.scala b/jvm/src/main/scala/com/harana/modules/email/package.scala new file mode 100644 index 0000000..e31a593 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/package.scala @@ -0,0 +1,23 @@ +package com.harana.modules + +import com.harana.modules.email.models.EmailAddress + +import java.io.File + +package object email { + + sealed abstract class MailType + case object Plain extends MailType + case object Rich extends MailType + case object MultiPart extends MailType + + case class EmailMessage(from: (EmailAddress, String), + replyTo: Option[EmailAddress] = None, + to: List[EmailAddress], + cc: List[EmailAddress] = List(), + bcc: List[EmailAddress] = List(), + subject: String, + message: String, + richMessage: Option[String] = None, + attachments: List[File] = List()) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/facebook/Facebook.scala b/jvm/src/main/scala/com/harana/modules/facebook/Facebook.scala new file mode 100644 index 0000000..1cc3190 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/facebook/Facebook.scala @@ -0,0 +1,188 @@ +package com.harana.modules.facebook + +import com.facebook.ads.sdk.Campaign.{EnumBidStrategy, EnumObjective, EnumSpecialAdCategory} +import com.facebook.ads.sdk._ +import zio.Task +import zio.macros.accessible + +@accessible +trait Facebook { + + def createAd(adAccountId: String, + adLabels: Option[String] = None, + adsetId: Option[String] = None, + adsetSpec: Option[AdSet] = None, + audienceId: Option[String] = None, + bidAmount: Option[Long] = None, + creative: Option[AdCreative] = None, + dateFormat: Option[String] = None, + displaySequence: Option[Long] = None, + draftAdgroupId: Option[String] = None, + engagementAudience: Option[Boolean] = None, + executionOptions: List[Ad.EnumExecutionOptions] = List(), + includeDemoLinkHashes: Option[Boolean] = None, + name: Option[String] = None, + priority: Option[Long] = None, + sourceAdId: Option[String] = None, + status: Option[Ad.EnumStatus] = None, + trackingSpecs: Option[String] = None): Task[Ad] + + def createCampaign(adAccountId: String, + adLabels: Option[String] = None, + bidStrategy: Option[EnumBidStrategy] = None, + budgetRebalanceFlag: Option[Boolean] = None, + buyingType: Option[String] = None, + dailyBudget: Option[Long] = None, + executionOptions: List[Campaign.EnumExecutionOptions] = List(), + iterativeSplitTestConfigs: Option[String] = None, + lifetimeBudget: Option[Long] = None, + name: Option[String] = None, + objective: Option[EnumObjective] = None, + pacingTypes: List[String] = List(), + promotedObject: Option[String] = None, + sourceCampaignId: Option[String] = None, + specialAdCategory: Option[EnumSpecialAdCategory] = None, + spendCap: Option[Long] = None, + status: Option[Campaign.EnumStatus] = None, + topLineId: Option[String] = None, + upstreamEvents: Map[String, String] = Map()): Task[Campaign] + + def adAccount(adAccountId: String): Task[AdAccount] + + def adActivities(adAccountId: String): Task[List[AdActivity]] + + def adCreatives(adAccountId: String): Task[List[AdCreative]] + + def adCreativesByLabels(adAccountId: String): Task[List[AdCreative]] + + def adImages(adAccountId: String): Task[List[AdImage]] + + def adLabels(adAccountId: String): Task[List[AdLabel]] + + def adPlacePageSets(adAccountId: String): Task[List[AdPlacePageSet]] + + def adPlayables(adAccountId: String): Task[List[PlayableContent]] + + def adRulesHistory(adAccountId: String): Task[List[AdAccountAdRulesHistory]] + + def adRulesLibrary(adAccountId: String): Task[List[AdRule]] + + def ads(adAccountId: String): Task[List[Ad]] + + def adsByLabels(adAccountId: String): Task[List[Ad]] + + def adSets(adAccountId: String): Task[List[AdSet]] + + def adSetsByLabels(adAccountId: String): Task[List[AdSet]] + + def adPixels(adAccountId: String): Task[List[AdsPixel]] + + def adStudies(adAccountId: String): Task[List[AdStudy]] + + def adVolume(adAccountId: String): Task[List[AdAccountAdVolume]] + + def adAdvertisableApplications(adAccountId: String): Task[List[Application]] + + def adAffectedAdSets(adAccountId: String): Task[List[AdSet]] + + def adAgencies(adAccountId: String): Task[List[Business]] + + def adApplications(adAccountId: String): Task[List[Application]] + + def adAssignedUsers(adAccountId: String): Task[List[AssignedUser]] + + def adAsyncRequests(adAccountId: String): Task[List[AsyncRequest]] + + def adCampaigns(adAccountId: String): Task[List[Campaign]] + + def adCampaignsByLabels(adAccountId: String): Task[List[Campaign]] + + def adCustomAudiences(adAccountId: String): Task[List[CustomAudience]] + + def adCustomAudiencesTOS(adAccountId: String): Task[List[CustomAudiencesTOS]] + + def adCustomConversions(adAccountId: String): Task[List[CustomConversion]] + + def adDeliveryEstimate(adAccountId: String): Task[List[AdAccountDeliveryEstimate]] + + def adDeprecatedTargetingAdSets(adAccountId: String): Task[List[AdSet]] + + def adImpactingAdStudies(adAccountId: String): Task[List[AdStudy]] + + def adInsights(adAccountId: String): Task[List[AdsInsights]] + + def adInstagramAccounts(adAccountId: String): Task[List[InstagramUser]] + + def adMatchedSearchApplications(adAccountId: String): Task[List[AdAccountMatchedSearchApplicationsEdgeData]] + + def adMaxBid(adAccountId: String): Task[List[AdAccountMaxBid]] + + def adMinimumBudgets(adAccountId: String): Task[List[MinimumBudget]] + + def adOfflineConversionDataSets(adAccountId: String): Task[List[OfflineConversionDataSet]] + + def adOnBehalfRequests(adAccountId: String): Task[List[BusinessOwnedObjectOnBehalfOfRequest]] + + def adPromotePages(adAccountId: String): Task[List[Page]] + + def adPublisherBlockLists(adAccountId: String): Task[List[PublisherBlockList]] + + def adReachEstimate(adAccountId: String): Task[List[AdAccountReachEstimate]] + + def adReachFrequencyPredictions(adAccountId: String): Task[List[ReachFrequencyPrediction]] + + def adSavedAudiences(adAccountId: String): Task[List[SavedAudience]] + + def adSubscribedApps(adAccountId: String): Task[List[AdAccountSubscribedApps]] + + def adTargetingBrowse(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTargetingSearch(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTargetingSentenceLines(adAccountId: String): Task[List[TargetingSentenceLine]] + + def adTargetingSuggestions(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTargetingValidation(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTracking(adAccountId: String): Task[List[AdAccountTrackingData]] + + def adUsers(adAccountId: String): Task[List[AdAccountUser]] + + def userAccounts(userId: String): Task[List[Page]] + + def userAdAccounts(userId: String): Task[List[AdAccount]] + + def userAdStudies(userId: String): Task[List[AdStudy]] + + def userAlbums(userId: String): Task[List[Album]] + + def userAppRequests(userId: String): Task[List[AppRequest]] + + def userAssignedAdAccounts(userId: String): Task[List[AdAccount]] + + def userAssignedBusinessAssetGroups(userId: String): Task[List[BusinessAssetGroup]] + + def userAssignedPages(userId: String): Task[List[Page]] + + def userAssignedProductCatalogs(userId: String): Task[List[ProductCatalog]] + + def userBusinesses(userId: String): Task[List[Business]] + + def userBusinessUsers(userId: String): Task[List[BusinessUser]] + + def userConversations(userId: String): Task[List[UnifiedThread]] + + def userCustomLabels(userId: String): Task[List[PageUserMessageThreadLabel]] + + def userEvents(userId: String): Task[List[Event]] + + def userFriends(userId: String): Task[List[User]] + + def userAdVideos(userId: String): Task[List[AdVideo]] + + def video(videoId: String): Task[AdVideo] + + def videoThumbnails(videoId: String): Task[List[VideoThumbnail]] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/facebook/LiveFacebook.scala b/jvm/src/main/scala/com/harana/modules/facebook/LiveFacebook.scala new file mode 100644 index 0000000..ea34cd5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/facebook/LiveFacebook.scala @@ -0,0 +1,350 @@ +package com.harana.modules.facebook + +import com.facebook.ads.sdk.Campaign.{EnumBidStrategy, EnumObjective, EnumSpecialAdCategory} +import com.facebook.ads.sdk._ +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.ZIO.attemptBlocking +import zio.{Task, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveFacebook { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveFacebook(config, logger, micrometer) + } +} + +case class LiveFacebook(config: Config, logger: Logger, micrometer: Micrometer) extends Facebook { + + private val apiContext = for { + accessToken <- config.secret("facebook-access-token") + appSecret <- config.secret("facebook-app-secret") + } yield { + new APIContext(accessToken, appSecret) + } + + def createAd(adAccountId: String, + adLabels: Option[String] = None, + adsetId: Option[String] = None, + adsetSpec: Option[AdSet] = None, + audienceId: Option[String] = None, + bidAmount: Option[Long] = None, + creative: Option[AdCreative] = None, + dateFormat: Option[String] = None, + displaySequence: Option[Long] = None, + draftAdgroupId: Option[String] = None, + engagementAudience: Option[Boolean] = None, + executionOptions: List[Ad.EnumExecutionOptions] = List(), + includeDemoLinkHashes: Option[Boolean] = None, + name: Option[String] = None, + priority: Option[Long] = None, + sourceAdId: Option[String] = None, + status: Option[Ad.EnumStatus] = None, + trackingSpecs: Option[String] = None): Task[Ad] = + for { + ac <- apiContext + ad <- ZIO.attempt { + var ad = new AdAccount(adAccountId, ac).createAd() + if (adLabels.nonEmpty) ad.setAdlabels(adLabels.get) + if (adsetId.nonEmpty) ad.setAdsetId(adsetId.get) + if (adsetSpec.nonEmpty) ad.setAdsetSpec(adsetSpec.get) + if (audienceId.nonEmpty) ad.setAudienceId(audienceId.get) + if (bidAmount.nonEmpty) ad.setBidAmount(bidAmount.get) + if (creative.nonEmpty) ad.setCreative(creative.get) + if (dateFormat.nonEmpty) ad.setDateFormat(dateFormat.get) + if (displaySequence.nonEmpty) ad.setDisplaySequence(displaySequence.get) + if (draftAdgroupId.nonEmpty) ad.setDraftAdgroupId(draftAdgroupId.get) + if (engagementAudience.nonEmpty) ad.setEngagementAudience(engagementAudience.get) + ad.setExecutionOptions(executionOptions.asJava) + if (includeDemoLinkHashes.nonEmpty) ad.setIncludeDemolinkHashes(includeDemoLinkHashes.get) + if (name.nonEmpty) ad.setName(name.get) + if (priority.nonEmpty) ad.setPriority(priority.get) + if (sourceAdId.nonEmpty) ad.setSourceAdId(sourceAdId.get) + if (status.nonEmpty) ad.setStatus(status.get) + if (trackingSpecs.nonEmpty) ad.setTrackingSpecs(trackingSpecs.get) + + ad.execute() + } + } yield ad + + def createCampaign(adAccountId: String, + adLabels: Option[String] = None, + bidStrategy: Option[EnumBidStrategy] = None, + budgetRebalanceFlag: Option[Boolean] = None, + buyingType: Option[String] = None, + dailyBudget: Option[Long] = None, + executionOptions: List[Campaign.EnumExecutionOptions] = List(), + iterativeSplitTestConfigs: Option[String] = None, + lifetimeBudget: Option[Long] = None, + name: Option[String] = None, + objective: Option[EnumObjective] = None, + pacingTypes: List[String] = List(), + promotedObject: Option[String] = None, + sourceCampaignId: Option[String] = None, + specialAdCategory: Option[EnumSpecialAdCategory] = None, + spendCap: Option[Long] = None, + status: Option[Campaign.EnumStatus] = None, + topLineId: Option[String] = None, + upstreamEvents: Map[String, String] = Map()): Task[Campaign] = + for { + ac <- apiContext + campaign <- ZIO.attempt { + var campaign = new AdAccount(adAccountId, ac).createCampaign() + if (adLabels.nonEmpty) campaign = campaign.setAdlabels(adLabels.get) + if (bidStrategy.nonEmpty) campaign = campaign.setBidStrategy(bidStrategy.get) + if (buyingType.nonEmpty) campaign.setBuyingType(buyingType.get) + if (dailyBudget.nonEmpty) campaign.setDailyBudget(dailyBudget.get) + campaign.setExecutionOptions(executionOptions.asJava) + if (iterativeSplitTestConfigs.nonEmpty) campaign.setIterativeSplitTestConfigs(iterativeSplitTestConfigs.get) + if (lifetimeBudget.nonEmpty) campaign.setLifetimeBudget(lifetimeBudget.get) + if (name.nonEmpty) campaign.setName(name.get) + if (objective.nonEmpty) campaign.setObjective(objective.get) + campaign.setPacingType(pacingTypes.asJava) + if (promotedObject.nonEmpty) campaign.setPromotedObject(promotedObject.get) + if (sourceCampaignId.nonEmpty) campaign.setSourceCampaignId(sourceCampaignId.get) + if (spendCap.nonEmpty) campaign.setSpendCap(spendCap.get) + if (status.nonEmpty) campaign.setStatus(status.get) + if (topLineId.nonEmpty) campaign.setToplineId(topLineId.get) + campaign.setUpstreamEvents(upstreamEvents.asJava) + campaign.execute() + } + } yield campaign + + def adAccount(adAccountId: String): Task[AdAccount] = + for { + ac <- apiContext + pages <- ZIO.attempt(new AdAccount(adAccountId, ac).get.requestAllFields.execute()) + } yield pages + + def adActivities(adAccountId: String): Task[List[AdActivity]] = + adAccount(adAccountId, _.getActivities.requestAllFields.execute()) + + def adCreatives(adAccountId: String): Task[List[AdCreative]] = + adAccount(adAccountId, _.getAdCreatives.requestAllFields.execute()) + + def adCreativesByLabels(adAccountId: String): Task[List[AdCreative]] = + adAccount(adAccountId, _.getAdCreativesByLabels.requestAllFields.execute()) + + def adImages(adAccountId: String): Task[List[AdImage]] = + adAccount(adAccountId, _.getAdImages.requestAllFields.execute()) + + def adLabels(adAccountId: String): Task[List[AdLabel]] = + adAccount(adAccountId, _.getAdLabels.requestAllFields.execute()) + + def adPlacePageSets(adAccountId: String): Task[List[AdPlacePageSet]] = + adAccount(adAccountId, _.getAdPlacePageSets.requestAllFields.execute()) + + def adPlayables(adAccountId: String): Task[List[PlayableContent]] = + adAccount(adAccountId, _.getAdPlayables.requestAllFields.execute()) + + def adRulesHistory(adAccountId: String): Task[List[AdAccountAdRulesHistory]] = + adAccount(adAccountId, _.getAdRulesHistory.requestAllFields.execute()) + + def adRulesLibrary(adAccountId: String): Task[List[AdRule]] = + adAccount(adAccountId, _.getAdRulesLibrary.requestAllFields.execute()) + + def ads(adAccountId: String): Task[List[Ad]] = + adAccount(adAccountId, _.getAds.requestAllFields.execute()) + + def adsByLabels(adAccountId: String): Task[List[Ad]] = + adAccount(adAccountId, _.getAdsByLabels.requestAllFields.execute()) + + def adSets(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getAdSets.requestAllFields.execute()) + + def adSetsByLabels(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getAdSetsByLabels.requestAllFields.execute()) + + def adPixels(adAccountId: String): Task[List[AdsPixel]] = + adAccount(adAccountId, _.getAdsPixels.requestAllFields.execute()) + + def adStudies(adAccountId: String): Task[List[AdStudy]] = + adAccount(adAccountId, _.getAdStudies.requestAllFields.execute()) + + def adVolume(adAccountId: String): Task[List[AdAccountAdVolume]] = + adAccount(adAccountId, _.getAdsVolume.requestAllFields.execute()) + + def adAdvertisableApplications(adAccountId: String): Task[List[Application]] = + adAccount(adAccountId, _.getAdvertisableApplications.requestAllFields.execute()) + + def adAffectedAdSets(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getAffectedAdSets.requestAllFields.execute()) + + def adAgencies(adAccountId: String): Task[List[Business]] = + adAccount(adAccountId, _.getAgencies.requestAllFields.execute()) + + def adApplications(adAccountId: String): Task[List[Application]] = + adAccount(adAccountId, _.getApplications.requestAllFields.execute()) + + def adAssignedUsers(adAccountId: String): Task[List[AssignedUser]] = + adAccount(adAccountId, _.getAssignedUsers.requestAllFields.execute()) + + def adAsyncRequests(adAccountId: String): Task[List[AsyncRequest]] = + adAccount(adAccountId, _.getAsyncRequests.requestAllFields.execute()) + + def adCampaigns(adAccountId: String): Task[List[Campaign]] = + adAccount(adAccountId, _.getCampaigns.requestAllFields.execute()) + + def adCampaignsByLabels(adAccountId: String): Task[List[Campaign]] = + adAccount(adAccountId, _.getCampaignsByLabels.requestAllFields.execute()) + + def adCustomAudiences(adAccountId: String): Task[List[CustomAudience]] = + adAccount(adAccountId, _.getCustomAudiences.requestAllFields.execute()) + + def adCustomAudiencesTOS(adAccountId: String): Task[List[CustomAudiencesTOS]] = + adAccount(adAccountId, _.getCustomAudiencesTos.requestAllFields.execute()) + + def adCustomConversions(adAccountId: String): Task[List[CustomConversion]] = + adAccount(adAccountId, _.getCustomConversions.requestAllFields.execute()) + + def adDeliveryEstimate(adAccountId: String): Task[List[AdAccountDeliveryEstimate]] = + adAccount(adAccountId, _.getDeliveryEstimate.requestAllFields.execute()) + + def adDeprecatedTargetingAdSets(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getDeprecatedTargetingAdSets.requestAllFields.execute()) + + def adImpactingAdStudies(adAccountId: String): Task[List[AdStudy]] = + adAccount(adAccountId, _.getImpactingAdStudies.requestAllFields.execute()) + + def adInsights(adAccountId: String): Task[List[AdsInsights]] = + adAccount(adAccountId, _.getInsights.requestAllFields.execute()) + + def adInstagramAccounts(adAccountId: String): Task[List[InstagramUser]] = + adAccount(adAccountId, _.getInstagramAccounts.requestAllFields.execute()) + + def adMatchedSearchApplications(adAccountId: String): Task[List[AdAccountMatchedSearchApplicationsEdgeData]] = + adAccount(adAccountId, _.getMatchedSearchApplications.requestAllFields.execute()) + + def adMaxBid(adAccountId: String): Task[List[AdAccountMaxBid]] = + adAccount(adAccountId, _.getMaxBid.requestAllFields.execute()) + + def adMinimumBudgets(adAccountId: String): Task[List[MinimumBudget]] = + adAccount(adAccountId, _.getMinimumBudgets.requestAllFields.execute()) + + def adOfflineConversionDataSets(adAccountId: String): Task[List[OfflineConversionDataSet]] = + adAccount(adAccountId, _.getOfflineConversionDataSets.requestAllFields.execute()) + + def adOnBehalfRequests(adAccountId: String): Task[List[BusinessOwnedObjectOnBehalfOfRequest]] = + adAccount(adAccountId, _.getOnBehalfRequests.requestAllFields.execute()) + + def adPromotePages(adAccountId: String): Task[List[Page]] = + adAccount(adAccountId, _.getPromotePages.requestAllFields.execute()) + + def adPublisherBlockLists(adAccountId: String): Task[List[PublisherBlockList]] = + adAccount(adAccountId, _.getPublisherBlockLists.requestAllFields.execute()) + + def adReachEstimate(adAccountId: String): Task[List[AdAccountReachEstimate]] = + adAccount(adAccountId, _.getReachEstimate.requestAllFields.execute()) + + def adReachFrequencyPredictions(adAccountId: String): Task[List[ReachFrequencyPrediction]] = + adAccount(adAccountId, _.getReachFrequencyPredictions.requestAllFields.execute()) + + def adSavedAudiences(adAccountId: String): Task[List[SavedAudience]] = + adAccount(adAccountId, _.getSavedAudiences.requestAllFields.execute()) + + def adSubscribedApps(adAccountId: String): Task[List[AdAccountSubscribedApps]] = + adAccount(adAccountId, _.getSubscribedApps.requestAllFields.execute()) + + def adTargetingBrowse(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingBrowse.requestAllFields.execute()) + + def adTargetingSearch(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingSearch.requestAllFields.execute()) + + def adTargetingSentenceLines(adAccountId: String): Task[List[TargetingSentenceLine]] = + adAccount(adAccountId, _.getTargetingSentenceLines.requestAllFields.execute()) + + def adTargetingSuggestions(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingSuggestions.requestAllFields.execute()) + + def adTargetingValidation(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingValidation.requestAllFields.execute()) + + def adTracking(adAccountId: String): Task[List[AdAccountTrackingData]] = + adAccount(adAccountId, _.getTracking.requestAllFields.execute()) + + def adUsers(adAccountId: String): Task[List[AdAccountUser]] = + adAccount(adAccountId, _.getUsers.requestAllFields.execute()) + + def userAccounts(userId: String): Task[List[Page]] = + user(userId, _.getAccounts.requestAllFields.execute()) + + def userAdAccounts(userId: String): Task[List[AdAccount]] = + user(userId, _.getAdAccounts.requestAllFields.execute()) + + def userAdStudies(userId: String): Task[List[AdStudy]] = + user(userId, _.getAdStudies.requestAllFields.execute()) + + def userAlbums(userId: String): Task[List[Album]] = + user(userId, _.getAlbums.requestAllFields.execute()) + + def userAppRequests(userId: String): Task[List[AppRequest]] = + user(userId, _.getAppRequests.requestAllFields.execute()) + + def userAssignedAdAccounts(userId: String): Task[List[AdAccount]] = + user(userId, _.getAssignedAdAccounts.requestAllFields.execute()) + + def userAssignedBusinessAssetGroups(userId: String): Task[List[BusinessAssetGroup]] = + user(userId, _.getAssignedBusinessAssetGroups.requestAllFields.execute()) + + def userAssignedPages(userId: String): Task[List[Page]] = + user(userId, _.getAssignedPages.requestAllFields.execute()) + + def userAssignedProductCatalogs(userId: String): Task[List[ProductCatalog]] = + user(userId, _.getAssignedProductCatalogs.requestAllFields.execute()) + + def userBusinesses(userId: String): Task[List[Business]] = + user(userId, _.getBusinesses.requestAllFields.execute()) + + def userBusinessUsers(userId: String): Task[List[BusinessUser]] = + user(userId, _.getBusinessUsers.requestAllFields.execute()) + + def userConversations(userId: String): Task[List[UnifiedThread]] = + user(userId, _.getConversations.requestAllFields.execute()) + + def userCustomLabels(userId: String): Task[List[PageUserMessageThreadLabel]] = + user(userId, _.getCustomLabels.requestAllFields.execute()) + + def userEvents(userId: String): Task[List[Event]] = + user(userId, _.getEvents.requestAllFields.execute()) + + def userFriends(userId: String): Task[List[User]] = + user(userId, _.getFriends.requestAllFields.execute()) + + def userAdVideos(userId: String): Task[List[AdVideo]] = + user(userId, _.getVideos.requestAllFields.execute()) + + def video(videoId: String): Task[AdVideo] = + for { + ac <- apiContext + video <- ZIO.attempt(new AdVideo(videoId, ac).get().requestAllFields.execute()) + } yield video + + def videoThumbnails(videoId: String): Task[List[VideoThumbnail]] = + for { + ac <- apiContext + thumbnails <- ZIO.attempt(new AdVideo(videoId, ac).getThumbnails.requestAllFields.execute()) + } yield thumbnails + + + private def adAccount[A <: APINode](adAccountId: String, fn: AdAccount => APINodeList[A]): Task[List[A]] = + for { + ac <- apiContext + list <- attemptBlocking(fn(new AdAccount(adAccountId, ac))) + } yield list + + private def user[A <: APINode](userId: String, fn: User => APINodeList[A]): Task[List[A]] = + for { + ac <- apiContext + list <- attemptBlocking(fn(new User(userId, ac))) + } yield list + + private implicit def toList[A <: APINode](nodeList: APINodeList[A]): List[A] = + nodeList.iterator().asScala.toList +} diff --git a/jvm/src/main/scala/com/harana/modules/file/File.scala b/jvm/src/main/scala/com/harana/modules/file/File.scala new file mode 100644 index 0000000..b5767c4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/file/File.scala @@ -0,0 +1,41 @@ +package com.harana.modules.file + +import io.circe.{Decoder, Encoder} +import io.vertx.core.buffer.Buffer +import io.vertx.core.streams.ReadStream +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import one.jasyncfio.AsyncFile +import zio.Task +import zio.macros.accessible + +import java.nio.ByteBuffer +import java.nio.file.Path + +@accessible +trait File { + + def readStream(path: Path, range: Option[(Long, Long)] = None): Task[ReadStream[Buffer]] + def read(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None): Task[Int] + def readJson[A](file: Path)(implicit decoder: Decoder[A]): Task[A] + def readString(file: Path): Task[String] + + def writeAwsStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None): Task[Unit] + def writeStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None, + onData: Option[Buffer => (Buffer, Boolean)] = None): Task[Unit] + def write(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None): Task[Int] + def writeJson[A](file: Path, obj: A)(implicit encoder: Encoder[A]): Task[Unit] + def writeString(file: Path, string: String): Task[Unit] + + def merge(sourcePaths: List[Path], targetPath: Path): Task[Unit] + + def close(file: Either[Path, AsyncFile]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/file/LiveFile.scala b/jvm/src/main/scala/com/harana/modules/file/LiveFile.scala new file mode 100644 index 0000000..4fc4c59 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/file/LiveFile.scala @@ -0,0 +1,235 @@ +package com.harana.modules.file + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.file.LiveFile.{chunk_size, eventExecutor} +import com.harana.modules.vertx.models.streams.AsyncFileReadStream +import io.circe.syntax.EncoderOps +import io.circe.{Decoder, Encoder, jawn} +import io.vertx.core.buffer.Buffer +import io.vertx.core.streams.ReadStream +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import one.jasyncfio.{AsyncFile, EventExecutor} +import org.apache.commons.lang3.SystemUtils +import org.reactivestreams.{Subscriber, Subscription} +import zio.{Task, ZIO, ZLayer} + +import java.io.{FileInputStream, FileOutputStream} +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets +import java.nio.file.{Files, Path, StandardOpenOption} +import java.util + +object LiveFile { + val chunk_size = 1024 + val eventExecutor = if (SystemUtils.IS_OS_LINUX) Some(EventExecutor.initDefault()) else None + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveFile(config, logger, micrometer) + } +} + +case class LiveFile(config: Config, logger: Logger, micrometer: Micrometer) extends File { + + def readStream(path: Path, range: Option[(Long, Long)] = None): Task[ReadStream[Buffer]] = + ZIO.attempt(new AsyncFileReadStream(path.toFile.getAbsolutePath, range)) + + + def read(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None) = { + file match { + case Left(path) => + ZIO.attempt { + val bytes = Files.readAllBytes(path) + buffer.put(bytes) + bytes.size + } + + case Right(file) => + ZIO.fromFutureJava(if (position.nonEmpty) file.read(buffer, position.get) else file.read(buffer)).map(_.toInt) + } + } + + + def readJson[A](path: Path)(implicit decoder: Decoder[A]): Task[A] = + ZIO.fromEither(jawn.decode[A](Files.readString(path))) + + + def readString(path: Path): Task[String] = + ZIO.attempt(Files.readString(path)) + + + def writeAwsStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None): Task[Unit] = { + var emptyChunk = false + var headerEndPos = -1 + var chunkEndPos = -1 + val crlf = "\r\n".getBytes(StandardCharsets.UTF_8) + val delimiter = ";".getBytes(StandardCharsets.UTF_8) + val buffer = Buffer.buffer() + + def countUntil(data: Buffer, sequence: Array[Byte], start: Int): Int = { + for (i <- start to data.length()) { + if (i + sequence.length < data.length()) { + val bytes = data.getBytes(i, i + sequence.length) + if (util.Arrays.equals(bytes, sequence)) return i + } + } + -1 + } + + writeStream(path, stream, length, onStart, onStop, Some(data => { + buffer.appendBuffer(data) + + if (headerEndPos == -1) { + val delimiterPos = countUntil(buffer, delimiter, 0) + delimiter.length + emptyChunk = buffer.slice(0,2).toString(StandardCharsets.UTF_8).equals("0;") + headerEndPos = countUntil(buffer, crlf, delimiterPos) + crlf.length + } + + if (headerEndPos > 0 && chunkEndPos == -1) { + chunkEndPos = countUntil(buffer, crlf, headerEndPos) + } + + if (headerEndPos > 0 && chunkEndPos > 0) { + (buffer.slice(headerEndPos + 1, chunkEndPos), true) + } else { + (Buffer.buffer(), emptyChunk) + } + }) + ) + } + + + def writeStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None, + onData: Option[Buffer => (Buffer, Boolean)] = None): Task[Unit] = + if (eventExecutor.nonEmpty) + for { + file <- ZIO.fromCompletableFuture(AsyncFile.open(path, eventExecutor.get)) + _ <- ZIO.async((cb: Task[Unit] => Unit) => + stream.subscribe(new Subscriber[Buffer] { + var subscription: Subscription = _ + var remaining = length + + override def onSubscribe(sub: Subscription) = { + subscription = sub + if (onStart.nonEmpty) onStart.get.apply() + sub.request(if (remaining > chunk_size) chunk_size else remaining) + } + + override def onNext(t: Buffer) = { + val onDataResult = onData.map(_.apply(t)) + val data = if (onDataResult.nonEmpty) onDataResult.get._1 else t + file.write(data.getByteBuf.nioBuffer()) + + remaining -= data.length() + if (remaining == 0 || (onDataResult.nonEmpty && onDataResult.get._2)) { + subscription.cancel() + onComplete() + } else { + subscription.request(if (remaining > chunk_size) chunk_size else remaining) + } + } + + override def onError(t: Throwable) = throw t + + override def onComplete() = { + file.close() + if (onStop.nonEmpty) onStop.get.apply() + cb(ZIO.unit) + } + }) + ) + } yield () + else + ZIO.async { (cb: Task[Unit] => Unit) => + stream.subscribe(new Subscriber[Buffer] { + var subscription: Subscription = _ + var remaining = length + var fos: FileOutputStream = _ + + override def onSubscribe(sub: Subscription) = { + subscription = sub + fos = new FileOutputStream(path.toFile) + if (onStart.nonEmpty) onStart.get.apply() + subscription.request(if (remaining > chunk_size) chunk_size else remaining) + } + + override def onNext(t: Buffer) = { + val onDataResult = onData.map(_.apply(t)) + val data = if (onDataResult.nonEmpty) onDataResult.get._1 else t + fos.write(data.getBytes) + remaining -= data.length() + + if (remaining <= 0 || (onDataResult.nonEmpty && onDataResult.get._2)) { + subscription.cancel() + onComplete() + } else + subscription.request(if (remaining > chunk_size) chunk_size else remaining) + } + + override def onError(t: Throwable) = cb(ZIO.fail(t)) + + override def onComplete() = { + fos.close() + if (onStop.nonEmpty) onStop.get.apply() + cb(ZIO.unit) + } + }) + } + + + def write(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None) = + file match { + case Left(path) => + ZIO.attempt { + val array = buffer.array() + Files.write(path, array) + array.size + } + + case Right(file) => + ZIO.fromFutureJava(if (position.nonEmpty) file.write(buffer, position.get) else file.write(buffer)).map(_.toInt) + } + + + def writeJson[A](path: Path, obj: A)(implicit encoder: Encoder[A]): Task[Unit] = { + ZIO.attempt(Files.writeString(path, obj.asJson.noSpaces, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.SYNC)) + } + + + def writeString(path: Path, string: String): Task[Unit] = { + ZIO.attempt(Files.writeString(path, string, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.SYNC)) + } + + + def merge(sourcePaths: List[Path], targetPath: Path): Task[Unit] = + ZIO.attempt { + val target = new FileOutputStream(targetPath.toFile, true).getChannel + sourcePaths.foreach { path => + val fis = new FileInputStream(path.toFile).getChannel + fis.transferFrom(target, Files.size(path), target.size()) + fis.close() + } + } + + + def close(file: Either[Path, AsyncFile]) = + file match { + case Left(path) => + ZIO.unit + case Right(file) => + ZIO.fromFutureJava(file.close()).map(_.toInt) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/git/Git.scala b/jvm/src/main/scala/com/harana/modules/git/Git.scala new file mode 100644 index 0000000..7cf5406 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/git/Git.scala @@ -0,0 +1,36 @@ +package com.harana.modules.git + +import org.eclipse.jgit.api.{Git => JGit} +import org.eclipse.jgit.lib.Ref +import zio.Task +import zio.macros.accessible + +import java.io.File + +@accessible +trait Git { + + def clone(uri: String, + localDirectory: File, + branch: Option[String] = None, + username: Option[String] = None, + password: Option[String] = None, + oauthToken: Option[String] = None): Task[JGit] + + def checkout(git: JGit, branchTagOrCommit: String): Task[Ref] + + def branch(git: JGit, + branch: String, + track: Boolean = true): Task[Ref] + + def refresh(git: JGit): Task[Unit] + + def hasChanged(git: JGit): Task[Boolean] + + def mostRecentCommitHash(git: JGit): Task[Option[String]] + + def filesForCommit(git: JGit, hash: String): Task[List[File]] + + def latestFiles(git: JGit): Task[List[File]] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/git/LiveGit.scala b/jvm/src/main/scala/com/harana/modules/git/LiveGit.scala new file mode 100644 index 0000000..4248a04 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/git/LiveGit.scala @@ -0,0 +1,105 @@ +package com.harana.modules.git + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import org.eclipse.jgit.api.{CreateBranchCommand, Git => JGit} +import org.eclipse.jgit.lib.{ObjectId, Ref} +import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider +import org.eclipse.jgit.treewalk.TreeWalk +import zio.{Task, ZIO, ZLayer} + +import java.io.File +import scala.collection.mutable + +object LiveGit { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveGit(config, logger, micrometer) + } +} + +case class LiveGit(config: Config, logger: Logger, micrometer: Micrometer) extends Git { + + def clone(uri: String, + localDirectory: File, + branch: Option[String] = None, + username: Option[String] = None, + password: Option[String] = None, + oauthToken: Option[String] = None): Task[JGit] = + for { + git <- ZIO.succeed { + val cloneCommand = JGit.cloneRepository().setDirectory(localDirectory).setURI(uri) + if (branch.nonEmpty) cloneCommand.setBranch(branch.get) + if (username.nonEmpty && password.nonEmpty) cloneCommand.setCredentialsProvider(new UsernamePasswordCredentialsProvider(username.get, password.get)) + if (oauthToken.nonEmpty) cloneCommand.setCredentialsProvider(new UsernamePasswordCredentialsProvider(oauthToken.get, "")) + cloneCommand.call() + } + } yield git + + + def checkout(git: JGit, branchTagOrCommit: String): Task[Ref] = + for { + ref <- ZIO.attempt(git.checkout().setName(branchTagOrCommit).call()) + } yield ref + + + def branch(git: JGit, branch: String, track: Boolean = true): Task[Ref] = + ZIO.attempt { + git + .checkout + .setCreateBranch(true) + .setName(branch) + .setUpstreamMode(CreateBranchCommand.SetupUpstreamMode.TRACK) + .setStartPoint("origin/" + branch) + .call() + } + + + def refresh(git: JGit): Task[Unit] = + for { + _ <- ZIO.attempt(git.pull().call()) + } yield () + + + def hasChanged(git: JGit): Task[Boolean] = + for { + prevCommit <- mostRecentCommitHash(git) + _ <- refresh(git) + newCommit <- mostRecentCommitHash(git) + changed = prevCommit.isEmpty && newCommit.nonEmpty || prevCommit.nonEmpty && newCommit.nonEmpty && prevCommit != newCommit + } yield changed + + + def mostRecentCommitHash(git: JGit): Task[Option[String]] = + for { + it <- ZIO.attempt(git.log.setMaxCount(1).call.iterator()).option + hash = if (it.nonEmpty && it.get.hasNext) Some(it.get.next().getName) else None + } yield hash + + + def filesForCommit(git: JGit, hash: String): Task[List[File]] = { + ZIO.attempt { + val treeWalk = new TreeWalk(git.getRepository) + treeWalk.reset(ObjectId.fromString(hash)) + + val paths = mutable.ListBuffer[File]() + + while (treeWalk.next) paths += new File(treeWalk.getPathString) + if (treeWalk != null) treeWalk.close() + + paths.toList + } + } + + + def latestFiles(git: JGit): Task[List[File]] = + for { + hash <- mostRecentCommitHash(git) + files <- ZIO.ifZIO(ZIO.succeed(hash.nonEmpty))(filesForCommit(git, hash.get), ZIO.attempt(List())) + _ <- logger.debug(s"Latest files end") + } yield files +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/google/Google.scala b/jvm/src/main/scala/com/harana/modules/google/Google.scala new file mode 100644 index 0000000..45c2526 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/google/Google.scala @@ -0,0 +1,21 @@ +package com.harana.modules.google + +import zio.Task +import zio.macros.accessible + +@accessible +trait Google { + + def pageView(clientId: String, page: String, title: String): Task[String] + + def event(clientId: String, category: String, action: String, label: String, value: String): Task[Event] + + def exception(clientId: String, description: String, fatal: Boolean): Task[String] + + def time(clientId: String, category: String, variable: String, time: Long, label: String): Task[Event] + + def send(event: Event): Task[Unit] + + def batch(events: List[Event]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/google/LiveGoogle.scala b/jvm/src/main/scala/com/harana/modules/google/LiveGoogle.scala new file mode 100644 index 0000000..a996feb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/google/LiveGoogle.scala @@ -0,0 +1,104 @@ +package com.harana.modules.google + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{Task, ZIO, ZLayer} + +object LiveGoogle { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveGoogle(config, http, logger, micrometer) + } +} + +case class LiveGoogle(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Google { + + def pageView(clientId: String, page: String, title: String): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + domain <- config.string("http.domain", "domain") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "pageview", + "dh" -> domain, + "dp" -> (if (page.startsWith("/")) page else s"/$page"), + "dt" -> title, + ).mkString("&") + ) + } yield event + + + def event(clientId: String, category: String, action: String, label: String, value: String): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "event", + "ec" -> category, + "ea" -> action, + "el" -> label, + "ev" -> value + ).mkString("&") + ) + } yield event + + + def exception(clientId: String, description: String, fatal: Boolean): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "event", + "exd" -> description, + "exf" -> (if (fatal) 1 else 0) + ).mkString("&") + ) + } yield event + + + def time(clientId: String, category: String, variable: String, time: Long, label: String): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "timing", + "utc" -> category, + "utv" -> variable , + "utt" -> time, + "utl" -> label + ).mkString("&") + ) + } yield event + + + def send(event: Event): Task[Unit] = + for { + url <- config.string("google.tags.url") + _ <- http.post(s"$url/collect", Some(event)).mapError(e => new Exception(e.toString)) + } yield () + + + def batch(events: List[Event]): Task[Unit] = + for { + url <- config.string("google.tags.url") + _ <- http.post(s"$url/batch", Some(events.mkString("\n"))).mapError(e => new Exception(e.toString)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/google/package.scala b/jvm/src/main/scala/com/harana/modules/google/package.scala new file mode 100644 index 0000000..6aabd81 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/google/package.scala @@ -0,0 +1,7 @@ +package com.harana.modules + +package object google { + + type Event = String + +} diff --git a/jvm/src/main/scala/com/harana/modules/handlebars/Handlebars.scala b/jvm/src/main/scala/com/harana/modules/handlebars/Handlebars.scala new file mode 100644 index 0000000..88c2162 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/handlebars/Handlebars.scala @@ -0,0 +1,13 @@ +package com.harana.modules.handlebars + +import zio.Task +import zio.macros.accessible + +@accessible +trait Handlebars { + + def renderPath(path: String, props: Map[String, Object]): Task[String] + + def renderString(name: String, props: Map[String, Object]): Task[String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/handlebars/LiveHandlebars.scala b/jvm/src/main/scala/com/harana/modules/handlebars/LiveHandlebars.scala new file mode 100644 index 0000000..460b28d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/handlebars/LiveHandlebars.scala @@ -0,0 +1,46 @@ +package com.harana.modules.handlebars + +import com.github.jknack.handlebars.{Context, Handlebars => CoreHandlebars} +import com.github.jknack.handlebars.context.{JavaBeanValueResolver, MapValueResolver, MethodValueResolver} +import com.github.jknack.handlebars.io.ClassPathTemplateLoader +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{Task, ZIO, ZLayer} + +object LiveHandlebars { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveHandlebars(config, logger, micrometer) + } +} + +case class LiveHandlebars(config: Config, logger: Logger, micrometer: Micrometer) extends Handlebars { + + private val handlebars = { + val l = new ClassPathTemplateLoader + l.setPrefix("/templates/") + l.setSuffix(".hbs") + val hb = new CoreHandlebars(l) + hb.registerHelper("each", ScalaEachHelper) + hb.infiniteLoops(true) + } + + def renderPath(name: String, props: Map[String, Object]): Task[String] = + ZIO.attempt(handlebars.compile(name)(context(props))) + + + def renderString(content: String, props: Map[String, Object]): Task[String] = + ZIO.attempt(handlebars.compileInline(content)(context(props))) + + + private def context(props: Map[String, Object]) = + Context + .newBuilder(props) + .resolver(ScalaResolver, MapValueResolver.INSTANCE, MethodValueResolver.INSTANCE, JavaBeanValueResolver.INSTANCE + ).build() + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/handlebars/ScalaResolver.scala b/jvm/src/main/scala/com/harana/modules/handlebars/ScalaResolver.scala new file mode 100644 index 0000000..a02e3d6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/handlebars/ScalaResolver.scala @@ -0,0 +1,59 @@ +package com.harana.modules.handlebars + +import com.github.jknack.handlebars.context.MapValueResolver +import com.github.jknack.handlebars.helper.EachHelper +import com.github.jknack.handlebars.{Helper, Options, ValueResolver} + +import scala.jdk.CollectionConverters._ +import scala.reflect.runtime.{universe => ru} +import scala.util.Try + +object ScalaResolver extends ValueResolver { + + private val rootMirror = ru.runtimeMirror(getClass.getClassLoader) + + private def methodMirrorFor(context: AnyRef, name: String): Option[ru.MethodMirror] = { + val meta = rootMirror.reflect(context) + val optAccessor = meta.symbol.info.decls find { m => + m.isMethod && m.isPublic && m.name.toString == name + } + optAccessor.map(a => meta.reflectMethod(a.asMethod)) + } + + override def resolve(context: AnyRef, name: String): AnyRef = context match { + case m: collection.Map[_,_] => MapValueResolver.INSTANCE.resolve(m.asJava, name) + case _ => + val optMM = methodMirrorFor(context, name) + val ret = optMM.fold(ValueResolver.UNRESOLVED)(m => resolve(m.apply())): AnyRef + println(s"...returning ${ret.toString}") + ret + } + + override def resolve(context: scala.Any): AnyRef = context match { + case m: collection.Map[_,_] => MapValueResolver.INSTANCE.resolve(m.asJava) + case Some(x: AnyRef) => x + case None => null + case x: AnyRef => x + } + + override def propertySet(context: scala.Any): java.util.Set[java.util.Map.Entry[String, AnyRef]] = context match { + case m: collection.Map[_,_] => + MapValueResolver.INSTANCE.propertySet(m.asJava) + case _ => + println(s"ScalaMemberResolver.propertySet in context: [${context.getClass.getName}]") + val meta = rootMirror.reflect(context) + val accessors = meta.symbol.info.decls.filter(m => m.isMethod && m.isPublic).toSeq + val results = for { + a <- accessors + v <- Try(meta.reflectMethod(a.asMethod).apply()).toOption + } yield a.name.toString -> v.asInstanceOf[AnyRef] + results.toMap.asJava.entrySet + } +} + +object ScalaEachHelper extends Helper[AnyRef] { + override def apply(context: scala.AnyRef, options: Options): AnyRef = context match { + case iter: Iterable[_] => EachHelper.INSTANCE.apply(iter.asJava, options) + case _ => EachHelper.INSTANCE.apply(context, options) + } +} diff --git a/jvm/src/main/scala/com/harana/modules/ignite/Ignite.scala b/jvm/src/main/scala/com/harana/modules/ignite/Ignite.scala new file mode 100644 index 0000000..5a8ae69 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ignite/Ignite.scala @@ -0,0 +1,94 @@ +package com.harana.modules.ignite + +import zio.Task +import zio.macros.accessible + +@accessible +trait Ignite { + + def attach(vmId: String, + quiet: Option[Boolean] = None): Task[List[String]] + + def completion(quiet: Option[Boolean] = None): Task[List[String]] + + def copyToVM(source: String, + destination: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None): Task[List[String]] + + def createVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] + + def exec(vmId: String, + command: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] + + def importImage(ociImage: String, + quiet: Option[Boolean] = None): Task[List[String]] + + def inspectVM(vmId: String, + outputFormat: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] + + def killVMs(vmIds: List[String], + quiet: Option[Boolean] = None): Task[List[String]] + + def listImages(quiet: Option[Boolean] = None): Task[List[String]] + + def listVMs(all: Option[Boolean] = None, + filter: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] + + def logs(vmId: String, + quiet: Option[Boolean] = None): Task[List[String]] + + def removeImages(imageIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def removeVMs(vmIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def runVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + interactive: Option[Boolean] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] + + def ssh(vmId: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] + + def startVM(vmId: String, + interactive: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def stopVM(vmId: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def version(outputFormat: Option[String] = None, + quiet: Option[Boolean] = None): Task[List[String]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ignite/LiveIgnite.scala b/jvm/src/main/scala/com/harana/modules/ignite/LiveIgnite.scala new file mode 100644 index 0000000..eee23da --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ignite/LiveIgnite.scala @@ -0,0 +1,235 @@ +package com.harana.modules.ignite + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.process.Command +import zio.{Task, UIO, ZIO, ZLayer} + +object LiveIgnite { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveIgnite(config, logger, micrometer) + } +} + +case class LiveIgnite(config: Config, logger: Logger, micrometer: Micrometer) extends Ignite { + + def attach(vmId: String, quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("action", vmId) ++ quiet: _*).lines + } yield cmd.toList + + + def completion(quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("completion") ++ quiet: _*).lines + } yield cmd.toList + + + def copyToVM(source: String, + destination: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + timeout <- intArg("timeout", timeout) + cmd <- Command("ignite", List("cp", source, destination) ++ quiet ++ timeout: _*).lines + } yield cmd.toList + + + def createVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] = + for { + cpusArg <- intArg("cpus", cpus, includeValue = true) + diskArg <- intArg("size", disk, includeValue = true) + labelsArg = if (labels.isEmpty) List() else List("l", labels.map { case (k,v) => s"$k=$v" }.mkString) + memoryArg <- intArg("memory", memory, includeValue = true) + nameArg <- stringArg("name", name, includeValue = true) + quietArg <- booleanArg("quiet", quiet) + sshArg = ssh.map(s => List(s"ssh=$s")).getOrElse(List()) + args = cpusArg ++ diskArg ++ labelsArg ++ memoryArg ++ nameArg ++ quietArg ++ sshArg + cmd <- Command("ignite", List("create", ociImage) ++ args.toSeq: _*).lines + } yield cmd.toList + + + def exec(vmId: String, + command: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + timeout <- intArg("timeout", timeout) + tty <- booleanArg("tty", tty) + cmd <- Command("ignite", List("exec", command) ++ quiet ++ timeout ++ tty: _*).lines + } yield cmd.toList + + + def importImage(ociImage: String, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("image", "import", ociImage) ++ quiet: _*).lines + } yield cmd.toList + + + def inspectVM(vmId: String, + outputFormat: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] = + for { + outputFormat <- stringArg("output", outputFormat) + quiet <- booleanArg("quiet", quiet) + template <- stringArg("t", template, includeValue = true) + cmd <- Command("ignite", List("inspect", "vm", vmId) ++ outputFormat ++ quiet ++ template: _*).lines + } yield cmd.toList + + + def killVMs(vmIds: List[String], + quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("kill") ++ vmIds ++ quiet: _*).lines + } yield cmd.toList + + + def listImages(quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("image", "ls") ++ quiet: _*).lines + } yield cmd.toList + + + def listVMs(all: Option[Boolean] = None, + filter: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] = + for { + all <- booleanArg("all", all) + filter <- stringArg("filter", filter, includeValue = true) + quiet <- booleanArg("quiet", quiet) + template <- stringArg("template", template) + cmd <- Command("ignite", List("ps") ++ all ++ filter ++ quiet ++ template: _*).lines + } yield cmd.toList + + + def logs(vmId: String, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("logs", vmId) ++ quiet: _*).lines + } yield cmd.toList + + + def removeImages(imageIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + force <- booleanArg("force", force) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("rmi") ++ imageIds ++ force ++ quiet: _*).lines + } yield cmd.toList + + + def removeVMs(vmIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + force <- booleanArg("force", force) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("rm") ++ vmIds ++ force ++ quiet: _*).lines + } yield cmd.toList + + + def runVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + interactive: Option[Boolean] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] = + for { + cpusArg <- intArg("cpus", cpus, includeValue = true) + copyFilesArg = if (copyFiles.isEmpty) List() else List("copy-files", copyFiles.mkString) + diskArg <- intArg("size", disk, includeValue = true) + interactiveArg <- booleanArg("i", interactive) + labelsArg = if (labels.isEmpty) List() else List("l", labels.map { case (k,v) => s"$k=$v" }.mkString) + memoryArg <- intArg("memory", memory, includeValue = true) + nameArg <- stringArg("name", name, includeValue = true) + quietArg <- booleanArg("quiet", quiet) + sshesArg = ssh.map(s => List(s"ssh=$s")).getOrElse(List()) + args = copyFilesArg ++ cpusArg ++ diskArg ++ interactiveArg ++ labelsArg ++ memoryArg ++ nameArg ++ quietArg ++ sshesArg + cmd <- Command("ignite", List("run", ociImage) ++ args.toSeq: _*).lines + } yield cmd.toList + + + def ssh(vmId: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + timeout <- intArg("timeout", timeout, includeValue = true) + tty <- booleanArg("tty", tty) + cmd <- Command("ignite", List("ssh", vmId) ++ quiet ++ timeout ++ tty: _*).lines + } yield cmd.toList + + + def startVM(vmId: String, + interactive: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + interactive <- booleanArg("interactive", interactive) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("start", vmId) ++ interactive ++ quiet: _*).lines + } yield cmd.toList + + + def stopVM(vmId: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + force <- booleanArg("force-kill", force) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("stop") ++ vmId ++ force ++ quiet: _*).lines + } yield cmd.toList + + + def version(outputFormat: Option[String] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + outputFormat <- stringArg("output", outputFormat, includeValue = true) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("version") ++ outputFormat ++ quiet: _*).lines + } yield cmd.toList + + + private def booleanArg(name: String, arg: Option[Boolean], includeValue: Boolean = false): UIO[List[String]] = + ZIO.succeed(arg.map(v => List(name) ++ (if (includeValue) List(v.toString) else List())).getOrElse(List())) + + private def intArg(name: String, arg: Option[Int], includeValue: Boolean = false): UIO[List[String]] = + ZIO.succeed(arg.map(v => List(name) ++ (if (includeValue) List(v.toString) else List())).getOrElse(List())) + + private def stringArg(name: String, arg: Option[String], includeValue: Boolean = false): UIO[List[String]] = + ZIO.succeed(arg.map(v => List(name) ++ (if (includeValue) List(v) else List())).getOrElse(List())) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/Jsoup.scala b/jvm/src/main/scala/com/harana/modules/jsoup/Jsoup.scala new file mode 100644 index 0000000..babb81c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/Jsoup.scala @@ -0,0 +1,49 @@ +package com.harana.modules.jsoup + +import com.harana.modules.jsoup.models._ +import org.jsoup.nodes.{Document, Element, Node} +import org.jsoup.select.Elements +import zio.macros.accessible +import zio.stream._ +import zio.{IO, UIO} + +import java.io.{BufferedInputStream, File} +import java.net.URL + +@accessible +trait Jsoup { + def parse(file: File): IO[JsoupError, Document] + + def parse(string: String, fragment: Boolean = false): IO[JsoupError, Document] + + def parse(string: String, baseUri: String): IO[JsoupError, Document] + + def parse(url: URL, connectionOptions: ConnectionOptions): IO[JsoupError, Document] + + def parse(urlStream: Stream[JsoupError, URL], connectionOptions: ConnectionOptions): UIO[Stream[JsoupError, Document]] + + def elementStream(doc: Document, selector: String): UIO[Stream[JsoupError, Element]] + + def linkStream(doc: Document): UIO[Stream[JsoupError, URL]] + + def mediaStream(doc: Document): UIO[Stream[JsoupError, URL]] + + def stream(url: URL, connectionOptions: ConnectionOptions): IO[JsoupError, BufferedInputStream] + + def download(url: URL, path: File, connectionOptions: ConnectionOptions): IO[JsoupError, Unit] + + def mirror(url: URL, downloadDir: File, connectionOptions: ConnectionOptions): IO[JsoupError, Unit] + + def recursiveDownload(startDoc: Document, + navigateSelector: String, + downloadSelector: String, + downloadDir: File, + shouldDownload: Document => Boolean, + connectionOptions: ConnectionOptions): IO[JsoupError, Unit] +} + +object Jsoup { + implicit def enrichElements(xs: Elements): RichElements = new RichElements(xs) + implicit def enrichElement(el: Element): RichElement = new RichElement(el) + implicit def enrichNodeList[N <: Node](l: java.util.List[N]): RichNodeList[N] = new RichNodeList(l) +} diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/AttributeOption.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/AttributeOption.scala new file mode 100644 index 0000000..7d304a8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/AttributeOption.scala @@ -0,0 +1,34 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Attributes + +import scala.jdk.CollectionConverters._ +import scala.util.matching.Regex + +trait AttributeOption extends ElementTarget { + def attribute(name: String): Option[String] = EmptyStringToOption(target.attr(name)) + def attributeRegex(nameRegex: Regex): Option[String] = AttributeRegexToOption(target.attributes, nameRegex) +} + +trait ElementsAttributeOption extends ElementsTarget { + def attribute(name: String): Option[String] = EmptyStringToOption(target.attr(name)) + + def attributeRegex(nameRegex: Regex): Option[String] = { + val elems = target.listIterator.asScala + elems.collectFirst(Function.unlift(elem => AttributeRegexToOption(elem.attributes, nameRegex))) + } +} + +object EmptyStringToOption { + def apply(ss: String): Option[String] = ss match { + case "" => None + case s: String => Some(s) + } +} + +object AttributeRegexToOption { + def apply(attributes: Attributes, nameRegex: Regex): Option[String] = { + val atts = attributes.asList.asScala + atts.find(att => nameRegex.findFirstIn(att.getKey).nonEmpty).flatMap(att => EmptyStringToOption(att.getValue)) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/ClosestElement.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/ClosestElement.scala new file mode 100644 index 0000000..366bf4b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/ClosestElement.scala @@ -0,0 +1,66 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Element +import org.jsoup.select.Elements + +object ClosestFinder extends JsoupImplicits { + + def findClosestOption(selector: String, elem: Element): Option[Element] = { + enrichElements(elem.select(selector)).headOption.orElse { + elem.parents.headOption.flatMap { _ => + findClosestOption(selector, elem.parents) + } + } + } + + def findClosestOption(selector: String, elems: Elements): Option[Element] = { + elems.select(selector).headOption.orElse { + elems.parents.headOption.flatMap { _ => + findClosestOption(selector, elems.parents) + } + } + } + + /** Returns an Elements - i.e. it doesn't just grab the first */ + def findClosest(selector: String, elems: Elements): Elements = { + elems.headOption.fold(elems) { _ => + val here = elems.select(selector) + here.headOption.fold(findClosest(selector, elems.parents))(_ => here) + } + } + + def findClosestBeforeOption(selector: String, elem: Element): Option[Element] = + findClosest(selector, new Elements(elem)).find(_.isBefore(elem)) + + def findClosestAfterOption(selector: String, elem: Element): Option[Element] = + findClosest(selector, new Elements(elem)).find(_.isAfter(elem)) + +} + +trait ClosestElement extends ElementTarget { + + def closestOption(selector: String): Option[Element] = + ClosestFinder.findClosestOption(selector, target) + + def closest(selector: String): Elements = + ClosestFinder.findClosest(selector, new Elements(target)) + + def closestBeforeOption(selector: String): Option[Element] = + ClosestFinder.findClosestBeforeOption(selector, target) + + def closestAfterOption(selector: String): Option[Element] = + ClosestFinder.findClosestAfterOption(selector, target) + +} + +trait ClosestElements extends ElementsTarget { + + def closestOption(selector: String): Option[Element] = { + ClosestFinder.findClosestOption(selector, target) + } + + def closest(selector: String): Elements = { + ClosestFinder.findClosest(selector, target) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/ConnectionOptions.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/ConnectionOptions.scala new file mode 100644 index 0000000..26054ec --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/ConnectionOptions.scala @@ -0,0 +1,20 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.Connection + +import javax.net.ssl.SSLSocketFactory + +case class ConnectionOptions(cookies: Map[String, String] = Map(), + data: Map[String, String] = Map(), + followRedirects: Option[Boolean] = None, + headers: Map[String, String] = Map(), + ignoreContentType: Option[Boolean] = None, + ignoreHttpErrors: Option[Boolean] = None, + maxBodySize: Option[Int] = None, + method: Option[Connection.Method] = None, + postDataCharset: Option[String] = None, + proxy: Option[Proxy] = None, + referrer: Option[String] = None, + requestBody: Option[String] = None, + sslSocketFactory: Option[SSLSocketFactory] = None, + timeout: Option[Int] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/DocumentPositioning.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/DocumentPositioning.scala new file mode 100644 index 0000000..4ecf66d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/DocumentPositioning.scala @@ -0,0 +1,36 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Element + +import scala.annotation.tailrec + +trait DocumentPositioning extends ElementTarget { + + lazy val documentCoordinates = coordinatesOf(List(), Some(target)).reverse + + @tailrec + private def coordinatesOf(accum: List[Int], maybeElement: Option[Element]): List[Int] = { + if (maybeElement.isEmpty) { + accum + } else { + val elem = maybeElement.get + coordinatesOf(accum :+ elem.siblingIndex, Option(elem.parent)) + } + } + + private def compareCoordinates(maybeOther: Option[DocumentPositioning])(f: (Int, Int) => Boolean): Boolean = { + maybeOther.fold(false) { other => + val zip = documentCoordinates.zipAll(other.documentCoordinates, 0, 0) + val maybeFirstDiff = zip.dropWhile(dc => dc._1 == dc._2).headOption + maybeFirstDiff.fold(false) { diff => + f(diff._1, diff._2) + } + } + } + + def isBefore(other: DocumentPositioning): Boolean = + compareCoordinates(Option(other))((a, b) => a < b) + + def isAfter(other: DocumentPositioning): Boolean = + compareCoordinates(Option(other))((a, b) => a > b) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/ElementTarget.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/ElementTarget.scala new file mode 100644 index 0000000..20960ee --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/ElementTarget.scala @@ -0,0 +1,11 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Element +import org.jsoup.select.Elements + +trait Target[T] { + val target: T +} + +trait ElementTarget extends Target[Element] +trait ElementsTarget extends Target[Elements] \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupError.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupError.scala new file mode 100644 index 0000000..c4a030c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupError.scala @@ -0,0 +1,8 @@ +package com.harana.modules.jsoup.models + +sealed trait JsoupError extends Product with Serializable +object JsoupError { + case object NotFound extends JsoupError + case object AlreadyStopped extends JsoupError + case class Exception(t: Throwable) extends JsoupError +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupImplicits.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupImplicits.scala new file mode 100644 index 0000000..4b10a57 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupImplicits.scala @@ -0,0 +1,36 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes._ +import org.jsoup.select.Elements + +import scala.jdk.CollectionConverters._ + +trait JsoupImplicits { + implicit def enrichElements(xs: Elements): RichElements = new RichElements(xs) + implicit def enrichElement(el: Element): RichElement = new RichElement(el) + implicit def enrichNodeList[N <: Node](l: java.util.List[N]): RichNodeList[N] = new RichNodeList(l) +} + +object JsoupImplicits extends JsoupImplicits + +class RichElements(val target: Elements) + extends Iterable[Element] + with ClosestElements + with ElementsAttributeOption { + + def iterator: Iterator[Element] = { + target.asScala.iterator + } +} + +class RichElement(val target: Element) + extends ClosestElement + with DocumentPositioning + with AttributeOption { +} + +class RichNodeList[N <: Node](val target: java.util.List[N]) extends Iterable[Node] { + def iterator: Iterator[Node] = { + target.asScala.iterator + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/Kind.scala b/jvm/src/main/scala/com/harana/modules/kind/Kind.scala new file mode 100644 index 0000000..112a778 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/Kind.scala @@ -0,0 +1,39 @@ +package com.harana.modules.kind + +import com.harana.modules.kind.models.Cluster +import zio.Task +import zio.macros.accessible + +import java.io.File + +@accessible +trait Kind { + + def createCluster(name: String, + cluster: Option[Cluster] = None, + kubeConfig: Option[File] = None, + nodeImage: Option[String] = None, + retainNodesOnFailure: Boolean = false, + waitForControlPlane: Int = 0): Task[List[String]] + + def deleteCluster(name: String, + kubeConfig: Option[File] = None): Task[Unit] + + def listClusters: Task[List[String]] + + def listNodes(name: String): Task[List[String]] + + def buildBaseImage(image: String): Task[Unit] + + def buildNodeImage(image: String): Task[Unit] + + def loadImage(image: String): Task[Unit] + + def exportLogs(path: Option[File] = None): Task[Unit] + + def exportKubeConfig(name: String, + path: Option[File] = None): Task[Unit] + + def printKubeConfig(name: String, + internalAddress: Boolean = false): Task[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/LiveKind.scala b/jvm/src/main/scala/com/harana/modules/kind/LiveKind.scala new file mode 100644 index 0000000..44cf0ad --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/LiveKind.scala @@ -0,0 +1,94 @@ +package com.harana.modules.kind + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kind.models.Cluster +import zio.process.Command +import zio.{Task, ZIO, ZLayer} + +import java.io.File +import scala.collection.mutable + +object LiveKind { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveKind(config, logger, micrometer) + } +} + +case class LiveKind(config: Config, logger: Logger, micrometer: Micrometer) extends Kind { + + def createCluster(name: String, + cluster: Option[Cluster] = None, + kubeConfig: Option[File] = None, + nodeImage: Option[String] = None, + retainNodesOnFailure: Boolean = false, + waitForControlPlane: Int = 0) = + for { + _ <- logger.info(s"Creating Kind cluster: $name") + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]("create", "cluster", "--name", name) + if (cluster.nonEmpty) args += s"--config ${generateConfig(cluster.get)}" + if (kubeConfig.nonEmpty) args += s"--kubeconfig ${kubeConfig.get.getAbsolutePath}" + if (nodeImage.nonEmpty) args += s"--image ${nodeImage.get}" + if (retainNodesOnFailure) args += s"--retain ${retainNodesOnFailure.toString}" + if (waitForControlPlane > 0) args += s"--wait ${waitForControlPlane}s" + args + } + cmd <- Command("kind", args.toSeq: _*).lines + } yield cmd.toList + + + def deleteCluster(name: String, kubeConfig: Option[File] = None): Task[Unit] = + for { + _ <- logger.info(s"Deleting Kind cluster: $name") + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]("delete", "cluster", "--name", name) + if (kubeConfig.nonEmpty) args += s"--kubeconfig ${kubeConfig.get.getAbsolutePath}" + args + } + _ <- Command("kind", args.toSeq: _*).lines + } yield () + + + def listClusters: Task[List[String]] = + for { + cmd <- Command("kind", "list", "nodes").lines + } yield cmd.toList + + + def listNodes(name: String): Task[List[String]] = + for { + cmd <- Command("kind", List("--name", name): _*).lines + } yield cmd.toList + + + def buildBaseImage(image: String): Task[Unit] = + null + + + def buildNodeImage(image: String): Task[Unit] = + null + + + def loadImage(image: String): Task[Unit] = + null + + + def exportLogs(path: Option[File] = None): Task[Unit] = + null + + + def exportKubeConfig(name: String, + path: Option[File] = None): Task[Unit] = + null + + + def printKubeConfig(name: String, + internalAddress: Boolean = false): Task[Unit] = + null +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/models/Cluster.scala b/jvm/src/main/scala/com/harana/modules/kind/models/Cluster.scala new file mode 100644 index 0000000..55864b3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/models/Cluster.scala @@ -0,0 +1,22 @@ +package com.harana.modules.kind.models + +case class Cluster(nodes: List[Node], + apiServerListenAddress: Option[String] = None, + apiServerListenPort: Option[String] = None, + disableDefaultCNI: Option[Boolean] = None, + ipFamily: Option[String] = None, + podSubnet: Option[String] = None, + serviceSubnet: Option[String] = None) + +case class Node(role: Option[String] = None, + image: Option[String] = None, + extraMounts: List[Mount] = List(), + extraPortMappings: List[PortMapping] = List()) + +case class Mount(hostPath: String, + containerPath: String) + +case class PortMapping(containerPort: Int, + hostPort: Int, + listenAddress: Option[String] = None, + protocol: Option[String] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/package.scala b/jvm/src/main/scala/com/harana/modules/kind/package.scala new file mode 100644 index 0000000..b842458 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/package.scala @@ -0,0 +1,11 @@ +package com.harana.modules + +import com.harana.modules.kind.models.Cluster + +import java.io.File + +package object kind { + + @inline + def generateConfig(cluster: Cluster): File = null +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes/Kubernetes.scala b/jvm/src/main/scala/com/harana/modules/kubernetes/Kubernetes.scala new file mode 100644 index 0000000..00af4b1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes/Kubernetes.scala @@ -0,0 +1,106 @@ +package com.harana.modules.kubernetes + +import akka.stream.scaladsl.Source +import akka.util.ByteString +import play.api.libs.json.{Format, Writes} +import skuber._ +import skuber.api.client._ +import skuber.api.patch.Patch +import skuber.apiextensions.CustomResourceDefinition +import zio.macros.accessible +import zio.stream.ZStream +import zio.{IO, Task} + +import scala.concurrent.Promise + +@accessible +trait Kubernetes { + def newClient: IO[K8SException, KubernetesClient] + + def get[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Option[O]] + + def exists[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Boolean] + + def save(client: KubernetesClient, namespace: String, crd: CustomResourceDefinition): IO[K8SException, CustomResourceDefinition] + + def createNamespace(client: KubernetesClient, namespace: String)(implicit lc: LoggingContext): IO[K8SException, Namespace] + + def create[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] + + def createPodAndWait(client: KubernetesClient, namespace: String, obj: Pod, startupTime: Long)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Pod] + + def podInState(client: KubernetesClient, namespace: String, name: String, desiredState: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] + + def podTerminating(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] + + def waitForPodToTerminate(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Unit] + + def update[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] + + def delete[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, gracePeriodSeconds: Int = -1)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] + + def deleteWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, options: DeleteOptions)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] + + def deleteAll[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def deleteAllSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def getNamespaceNames(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] + + def listByNamespace[O <: ObjectResource](client: KubernetesClient)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, Map[String, ListResource[O]]] + + def list[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def listSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def listWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def updateStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] + + def getStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchAll[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchAllContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions, bufsize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def getScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : IO[K8SException, Scale] + + def updateScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String, scale: Scale)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): IO[K8SException, Scale] + + def patch[P <: Patch, O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, patchData: P) + (implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): IO[K8SException, O] + + def getPodLogSource(client: KubernetesClient, namespace: String, name: String, queryParams: Pod.LogQueryParams)(implicit lc: LoggingContext): IO[K8SException, Source[ByteString, _]] + + def exec(client: KubernetesClient, + namespace: String, + podName: String, + command: Seq[String], + containerName: Option[String] = None, + stdin: Option[ZStream[Any, Nothing, String]] = None, + stdout: Option[String => Task[Unit]] = None, + stderr: Option[String => Task[Unit]] = None, + tty: Boolean = false, + maybeClose: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): IO[K8SException, Unit] + + def getServerAPIVersions(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] + + def resourceFromFile[A <: ObjectResource](fileName: String)(implicit fmt: Format[A]): Task[A] + + def close(client: KubernetesClient): IO[K8SException, Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes/LiveKubernetes.scala b/jvm/src/main/scala/com/harana/modules/kubernetes/LiveKubernetes.scala new file mode 100644 index 0000000..4b5b682 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes/LiveKubernetes.scala @@ -0,0 +1,251 @@ +package com.harana.modules.kubernetes + +import akka.actor.ActorSystem +import akka.stream.Materializer +import akka.stream.scaladsl.{Sink, Source} +import akka.util.ByteString +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory +import com.harana.modules.core.app.App.runEffect +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.LiveKubernetes._ +import play.api.libs.json.{Format, Json, Writes} +import skuber.api.Configuration +import skuber.api.client.{KubernetesClient, LoggingContext, RequestLoggingContext, WatchEvent} +import skuber.api.patch.Patch +import skuber.apiextensions.CustomResourceDefinition +import skuber.json.format.namespaceFormat +import skuber.{K8SException, k8sInit, _} +import zio.interop.reactivestreams.streamToPublisher +import zio.{Clock, _} +import zio.stream.ZStream + +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Promise + +object LiveKubernetes { + val yamlReader = new ObjectMapper(new YAMLFactory) + val jsonWriter = new ObjectMapper() + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveKubernetes(config, logger, micrometer) + } +} + +case class LiveKubernetes(config: Config, logger: Logger, micrometer: Micrometer) extends Kubernetes { + + def newClient: IO[K8SException, KubernetesClient] = { + val cld = classOf[ActorSystem].getClassLoader + implicit val system = ActorSystem("Kubernetes", classLoader = Some(cld)) + ZIO.attempt(k8sInit(Configuration.parseKubeconfigFile().get)).refineToOrDie[K8SException] + } + + + def get[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Option[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getOption[O](name)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def exists[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Boolean] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getOption(name)(fmt, rd, lc).map(_.nonEmpty) }.refineToOrDie[K8SException] + + + def save(client: KubernetesClient, namespace: String, crd: CustomResourceDefinition): IO[K8SException, CustomResourceDefinition] = + ZIO.fromFuture { _ => + client.usingNamespace(namespace).create(crd).recoverWith { + case alreadyExists: K8SException if alreadyExists.status.code.contains(409) => + client.get[CustomResourceDefinition](crd.name).flatMap { existing => + val currentVersion = existing.metadata.resourceVersion + val newMeta = crd.metadata.copy(resourceVersion = currentVersion) + val updatedObj = crd.copy(metadata = newMeta) + client.update(updatedObj) + } + } + + }.refineToOrDie[K8SException] + + + def createNamespace(client: KubernetesClient, namespace: String)(implicit lc: LoggingContext): IO[K8SException, Namespace] = + ZIO.fromFuture { _ => client.create[Namespace](Namespace(namespace)) }.refineToOrDie[K8SException] + + + def create[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).create[O](obj)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def createPodAndWait(client: KubernetesClient, namespace: String, obj: Pod, startupTime: Long)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Pod] = + for { + pod <- create[Pod](client, namespace, obj) + schedule = Schedule.fixed(500.milliseconds) && Schedule.recurUntil[Boolean](running => running == true) + _ <- podInState(client, namespace, obj.name, "running").repeat(schedule) + } yield pod + + + def podTerminating(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] = + get[Pod](client, namespace, name).map(pod => pod.flatMap(p => p.metadata.deletionTimestamp).nonEmpty) + + + def waitForPodToTerminate(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Unit] = + for { + schedule <- ZIO.succeed(Schedule.fixed(500.milliseconds) && Schedule.recurWhile[Boolean](terminating => terminating == true)) + _ <- (for { + terminating <- podTerminating(client, namespace, name) + exists <- exists[Pod](client, namespace, name) + } yield terminating && exists).repeat(schedule) + } yield () + + + def podInState(client: KubernetesClient, namespace: String, name: String, desiredState: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] = + get[Pod](client, namespace, name).map(maybePod => + for { + pod <- maybePod + status <- pod.status + containerStatus <- status.containerStatuses.headOption + containerState <- containerStatus.state + ready = containerState.id == desiredState + } yield ready + ).map(_.getOrElse(false)) + + + def update[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).update[O](obj)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def delete[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, gracePeriodSeconds: Int = -1)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).delete[O](name, gracePeriodSeconds)(rd, lc) }.refineToOrDie[K8SException] + + + def deleteWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, options: DeleteOptions)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).deleteWithOptions[O](name, options)(rd, lc) }.refineToOrDie[K8SException] + + + def deleteAll[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).deleteAll[ListResource[O]]()(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def deleteAllSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).deleteAllSelected[ListResource[O]](labelSelector)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def getNamespaceNames(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] = + ZIO.fromFuture { _ => client.getNamespaceNames(lc) }.refineToOrDie[K8SException] + + + def listByNamespace[O <: ObjectResource](client: KubernetesClient)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, Map[String, ListResource[O]]] = + ZIO.fromFuture { _ => client.listByNamespace[ListResource[O]]()(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def list[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).list[ListResource[O]]()(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def listSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).listSelected[ListResource[O]](labelSelector)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def listWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).listWithOptions[ListResource[O]](options)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def updateStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).updateStatus[O](obj)(fmt, rd, statusEv, lc) }.refineToOrDie[K8SException] + + + def getStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getStatus[O](name)(fmt, rd, statusEv, lc) }.refineToOrDie[K8SException] + + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).watch[O](obj)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).watch[O](name, sinceResourceVersion, bufSize)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def watchAll[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).watchAll[O](sinceResourceVersion, bufSize)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchContinuously[O](obj)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchContinuously[O](name, sinceResourceVersion, bufSize)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def watchAllContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchAllContinuously[O](sinceResourceVersion, bufSize)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def watchWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchWithOptions[O](options, bufSize)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def getScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : IO[K8SException, Scale] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getScale[O](objName)(rd, sc, lc) }.refineToOrDie[K8SException] + + + def updateScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String, scale: Scale)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): IO[K8SException, Scale] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).updateScale[O](objName, scale)(rd, sc, lc) }.refineToOrDie[K8SException] + + + def patch[P <: Patch, O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, patchData: P)(implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): IO[K8SException, O] = + ZIO.fromFuture { _ => client.patch[P, O](name, patchData, Some(namespace))(patchfmt, fmt, rd, lc) }.refineToOrDie[K8SException] + + + def getPodLogSource(client: KubernetesClient, namespace: String, name: String, queryParams: Pod.LogQueryParams)(implicit lc: LoggingContext): IO[K8SException, Source[ByteString, _]] = + ZIO.fromFuture { _ => client.getPodLogSource(name, queryParams, Some(namespace))(lc) }.refineToOrDie[K8SException] + + + def exec(client: KubernetesClient, + namespace: String, + podName: String, + command: Seq[String], + containerName: Option[String] = None, + stdin: Option[ZStream[Any, Nothing, String]] = None, + stdout: Option[String => Task[Unit]] = None, + stderr: Option[String => Task[Unit]] = None, + tty: Boolean = false, + close: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): IO[K8SException, Unit] = + for { + source <- if (stdin.nonEmpty) + for { + publisher <- stdin.get.toPublisher + source = Some(Source.fromPublisher(publisher)) + } yield source + else ZIO.none + + sinkOut = if (stdout.nonEmpty) Some(Sink.foreach[String](s => runEffect(stdout.get(s)))) else None + sinkErr = if (stderr.nonEmpty) Some(Sink.foreach[String](s => runEffect(stderr.get(s)))) else None + + _ <- ZIO.fromFuture { _ => + client.usingNamespace(namespace).exec(podName, command, containerName, source, sinkOut, sinkErr, tty, close)(lc) + }.refineToOrDie[K8SException] + } yield () + + + def getServerAPIVersions(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] = + ZIO.fromFuture { _ => client.getServerAPIVersions(lc) }.refineToOrDie[K8SException] + + + def resourceFromFile[A <: ObjectResource](fileName: String)(implicit fmt: Format[A]): Task[A] = + for { + yaml <- ZIO.attempt(scala.io.Source.fromResource(fileName).mkString) + obj <- ZIO.attempt(yamlReader.readValue(yaml, classOf[Object])) + .onError(ex => logger.info(s"Failed to parse YAML for: $fileName with message: ${ex.prettyPrint}")) + json <- ZIO.attempt(jsonWriter.writeValueAsString(obj)) + resource <- ZIO.attempt(Json.parse(json).as[A]) + .onError(ex => logger.info(s"Failed to convert YAML to object for: $fileName with message: ${ex.prettyPrint}")) + } yield resource + + + def close(client: KubernetesClient): IO[K8SException, Unit] = + ZIO.from(client.close).refineToOrDie[K8SException] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes_s3/KubernetesS3.scala b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/KubernetesS3.scala new file mode 100644 index 0000000..fbf4619 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/KubernetesS3.scala @@ -0,0 +1,18 @@ +package com.harana.modules.kubernetes_s3 + +import zio.Task +import zio.macros.accessible + +@accessible +trait KubernetesS3 { + + def createPersistentVolumeClaim(namePrefix: String, + namespace: String, + s3StorageClassName: String, + s3Endpoint: String, + s3Bucket: String, + s3Path: String, + s3AccessKeyId: String, + s3SecretAccessKey: String, + s3Capacity: Int): Task[String] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes_s3/LiveKubernetesS3.scala b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/LiveKubernetesS3.scala new file mode 100644 index 0000000..7192048 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/LiveKubernetesS3.scala @@ -0,0 +1,76 @@ +package com.harana.modules.kubernetes_s3 + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import io.circe.syntax._ +import skuber.PersistentVolume.AccessMode +import skuber.Resource.Quantity +import skuber.Volume.GenericVolumeSource +import skuber._ +import skuber.json.format._ +import zio.{ZIO, ZLayer} + +object LiveKubernetesS3 { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveKubernetesS3(config, kubernetes, logger, micrometer) + } +} + +case class LiveKubernetesS3(config: Config, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends KubernetesS3 { + + def createPersistentVolumeClaim(namePrefix: String, + namespace: String, + s3StorageClassName: String, + s3Endpoint: String, + s3Bucket: String, + s3Path: String, + s3AccessKeyId: String, + s3SecretAccessKey: String, + s3Capacity: Int) = + for { + _ <- logger.info(s"Creating volume with prefix: $namePrefix") + + client <- kubernetes.newClient + + _ <- kubernetes.create[Secret](client, namespace, + Secret(metadata = ObjectMeta(name = s"$namePrefix-secret", namespace = namespace), + data = Map("accessKeyID" -> s3AccessKeyId.getBytes, "secretAccessKey" -> s3SecretAccessKey.getBytes, "endpoint" -> s3Endpoint.getBytes) + )) + + _ <- kubernetes.create[PersistentVolume](client, namespace, + PersistentVolume(metadata = ObjectMeta(name = s"$namePrefix-pv", namespace = namespace), + spec = Some(PersistentVolume.Spec( + accessModes = List(AccessMode.ReadWriteMany), + capacity = Map(Resource.storage -> Quantity(s"${s3Capacity}Gi")), + claimRef = Some(ObjectReference(name = s"$namePrefix-pvc", namespace = namespace)), +// storageClassName = Some(s3StorageClassName), + source = GenericVolumeSource( + Map( + "csi" -> Map( + "driver" -> s"ru.yandex.s3.csi".asJson, + "controllerPublishSecretRef" -> Map("name" -> s"$namePrefix-secret", "namespace" -> namespace).asJson, + "nodePublishSecretRef" -> Map("name" -> s"$namePrefix-secret", "namespace" -> namespace).asJson, + "nodeStageSecretRef" -> Map("name" -> s"$namePrefix-secret", "namespace" -> namespace).asJson, + "volumeAttributes" -> Map("capacity" -> s"${s3Capacity}Gi", "mounter" -> "geesfs").asJson, + "volumeHandle" -> s"$s3Bucket/$s3Path".asJson + ).asJson + ).asJson.noSpaces + ) + )))) + + _ <- kubernetes.create[PersistentVolumeClaim](client, namespace, + PersistentVolumeClaim(metadata = ObjectMeta(name = s"$namePrefix-pvc", namespace = namespace), + spec = Some(PersistentVolumeClaim.Spec( + accessModes = List(AccessMode.ReadWriteMany), + resources = Some(Resource.Requirements(requests = Map(Resource.storage -> Quantity(s"${s3Capacity}Gi")))) + )) + )) + } yield s"$namePrefix-pvc" +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ldap/Ldap.scala b/jvm/src/main/scala/com/harana/modules/ldap/Ldap.scala new file mode 100644 index 0000000..4a101bf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ldap/Ldap.scala @@ -0,0 +1,15 @@ +package com.harana.modules.ldap + +import zio.Task +import zio.macros.accessible + +@accessible +trait Ldap { + + def createUser(emailAddress: String, password: String): Task[Unit] + + def deleteUser(emailAddress: String): Task[Unit] + + def setPassword(emailAddress: String, password: String): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ldap/LiveLdap.scala b/jvm/src/main/scala/com/harana/modules/ldap/LiveLdap.scala new file mode 100644 index 0000000..d8dfcdc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ldap/LiveLdap.scala @@ -0,0 +1,92 @@ +package com.harana.modules.ldap + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.unboundid.ldap.sdk._ +import zio.{Task, ZIO, ZLayer} + +object LiveLdap { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveLdap(config, logger, micrometer) + } +} + +case class LiveLdap(config: Config, logger: Logger, micrometer: Micrometer) extends Ldap { + + def createUser(emailAddress: String, password: String): Task[Unit] = + for { + connection <- getConnection + entry = new Entry(dn(emailAddress)) { addAttribute("userPassword", password) } + result <- ZIO.async { (cb: Task[Unit] => Unit) => + connection.asyncAdd(new AddRequest(entry), new AsyncResultListener { + override def ldapResultReceived(requestID: AsyncRequestID, ldapResult: LDAPResult): Unit = { + if (ldapResult.getResultCode.equals(ResultCode.SUCCESS)) cb(ZIO.unit) + else cb(ZIO.fail(new Exception(ldapResult.getDiagnosticMessage))) + } + }) + } + } yield result + + + def deleteUser(emailAddress: String): Task[Unit] = + for { + connection <- getConnection + result <- ZIO.async { (cb: Task[Unit] => Unit) => + connection.asyncDelete(new DeleteRequest(dn(emailAddress)), new AsyncResultListener { + override def ldapResultReceived(requestID: AsyncRequestID, ldapResult: LDAPResult): Unit = { + if (ldapResult.getResultCode.equals(ResultCode.SUCCESS)) cb(ZIO.unit) + else cb(ZIO.fail(new Exception(ldapResult.getDiagnosticMessage))) + } + }) + } + } yield result + + + def setPassword(emailAddress: String, password: String): Task[Unit] = + for { + connection <- getConnection + _ <- bind(connection) + modifyRequest = new ModifyRequest(dn(emailAddress), new Modification(ModificationType.REPLACE, "userPassword", password)) + result <- ZIO.async { (cb: Task[Unit] => Unit) => + connection.asyncModify(modifyRequest, new AsyncResultListener { + override def ldapResultReceived(requestID: AsyncRequestID, ldapResult: LDAPResult): Unit = { + if (ldapResult.getResultCode.equals(ResultCode.SUCCESS)) cb(ZIO.unit) + else cb(ZIO.fail(new Exception(ldapResult.getDiagnosticMessage))) + } + }) + } + } yield () + + + private def bind(connection: LDAPConnection): Task[Unit] = + for { + bindUsername <- config.secret("ldap-bind-username") + bindPassword <- config.secret("ldap-bind-password") + bindTimeout <- config.long("auth.ldap.bindTimeout") + bindRequest <- ZIO.succeed { + val br = new SimpleBindRequest(s"cn=$bindUsername", bindPassword) + br.setResponseTimeoutMillis(bindTimeout) + br + } + _ <- ZIO.attempt(connection.bind(bindRequest)) + } yield () + + + private def getConnection: Task[LDAPConnection] = + for { + host <- config.secret("ldap-bind-host") + port <- config.int("auth.ldap.port") + connectTimeout <- config.int("auth.ldap.connectTimeout") + connection = new LDAPConnection() + _ <- ZIO.attempt(connection.connect(host, port, connectTimeout)) + } yield connection + + + private def dn(username: String) = + s"uid=$username,dc=harana,dc=com" +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/meilisearch/LiveMeilisearch.scala b/jvm/src/main/scala/com/harana/modules/meilisearch/LiveMeilisearch.scala new file mode 100644 index 0000000..e419c2f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/meilisearch/LiveMeilisearch.scala @@ -0,0 +1,81 @@ +package com.harana.modules.meilisearch + +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.meilisearch.sdk.model.TaskInfo +import com.meilisearch.sdk.{Client, Config, SearchRequest, TasksHandler} +import io.circe.Encoder +import io.circe.syntax.EncoderOps +import zio.{Clock, Schedule, ZIO, ZLayer, durationInt} +import zio.Duration._ + +import scala.jdk.CollectionConverters._ + +object LiveMeilisearch { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveMeilisearch(config, logger, micrometer) + } +} + +case class LiveMeilisearch(config: Config, logger: Logger, micrometer: Micrometer) extends Meilisearch { + + def newClient(host: String, port: Option[Long] = None, apiKey: Option[String] = None) = + ZIO.attempt { + val url = s"http://$host:${port.getOrElse(7000)}" + new Client(if (apiKey.isEmpty) new Config(url) else new Config(url, apiKey.get)) + } + + + def createIndex(client: Client, index: String, primaryKey: Option[String] = None) = + executeTask(client, if (primaryKey.isEmpty) client.createIndex(index) else client.createIndex(index, primaryKey.get)) + + + def deleteIndex(client: Client, index: String) = + executeTask(client, client.deleteIndex(index)) + + + def addObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]) = + executeTask(client, client.index(index).addDocuments(objects.asJson.noSpaces)) + + + def updateObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]) = + executeTask(client, client.index(index).updateDocuments(objects.asJson.noSpaces)) + + + def deleteObjects(client: Client, index: String, ids: List[String]) = + executeTask(client, client.index(index).deleteDocuments(ids.asJava)) + + + def deleteAllObjects(client: Client, index: String) = + executeTask(client, client.index(index).deleteAllDocuments()) + + + def search(client: Client, index: String, query: String) = + ZIO.attempt(client.index(index).search(query).getHits.asScala.map(_.asScala.toMap).toList) + + + def search(client: Client, index: String, request: SearchRequest) = + ZIO.attempt(client.index(index).search(request).getHits.asScala.map(_.asScala.toMap).toList) + + + def stopWords(client: Client, index: String) = + ZIO.attempt(client.index(index).getStopWordsSettings.toList) + + + def updateStopWords(client: Client, index: String, stopWords: List[String]) = + ZIO.attempt(client.index(index).updateStopWordsSettings(stopWords.toArray)) + + + private def executeTask(client: Client, fn: => TaskInfo) = + for { + id <- ZIO.succeed(fn.getTaskUid) + schedule = Schedule.fixed(50 milliseconds) && Schedule.recurUntil[String](status => + status != TasksHandler.SUCCEEDED && status != TasksHandler.FAILED + ) + _ <- ZIO.attempt(client.getTask(id).getStatus).repeat(schedule) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/meilisearch/Meilisearch.scala b/jvm/src/main/scala/com/harana/modules/meilisearch/Meilisearch.scala new file mode 100644 index 0000000..8656819 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/meilisearch/Meilisearch.scala @@ -0,0 +1,33 @@ +package com.harana.modules.meilisearch + +import com.meilisearch.sdk.{Client, SearchRequest} +import io.circe.Encoder +import zio.Task +import zio.macros.accessible + +@accessible +trait Meilisearch { + + def newClient(host: String, port: Option[Long] = None, apiKey: Option[String] = None): Task[Client] + + def createIndex(client: Client, index: String, primaryKey: Option[String] = None): Task[Unit] + + def deleteIndex(client: Client, index: String): Task[Unit] + + def addObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]): Task[Unit] + + def updateObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]): Task[Unit] + + def deleteObjects(client: Client, index: String, ids: List[String]): Task[Unit] + + def deleteAllObjects(client: Client, index: String): Task[Unit] + + def search(client: Client, index: String, query: String): Task[List[Map[String, AnyRef]]] + + def search(client: Client, index: String, request: SearchRequest): Task[Unit] + + def stopWords(client: Client, index: String): Task[List[String]] + + def updateStopWords(client: Client, index: String, stopWords: List[String]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/mixpanel/LiveMixpanel.scala b/jvm/src/main/scala/com/harana/modules/mixpanel/LiveMixpanel.scala new file mode 100644 index 0000000..9681b60 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/mixpanel/LiveMixpanel.scala @@ -0,0 +1,166 @@ +package com.harana.modules.mixpanel + + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.mixpanel.mixpanelapi.{ClientDelivery, MessageBuilder, MixpanelAPI} +import org.json.{JSONArray, JSONObject} +import zio.{ZIO, ZLayer} + +import java.util.UUID +import scala.jdk.CollectionConverters._ + +object LiveMixpanel { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveMixpanel(config, logger, micrometer) + } +} + +case class LiveMixpanel(config: Config, logger: Logger, micrometer: Micrometer) extends Mixpanel { + + private val api = new MixpanelAPI() + private val messageBuilder = config.secret("mixpanel-token").map(t => new MessageBuilder(t)) + + def append(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.append(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def delete(id: UUID, modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.delete(id.toString, toJson(modifiers))) + } yield r + + + def event(id: UUID, name: String, properties: Map[String, Object]) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.event(id.toString, name, toJson(properties))) + } yield r + + + def groupDelete(groupKey: String, groupId: String, modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupDelete(groupKey, groupId, toJson(modifiers))) + } yield r + + + def groupMessage(groupKey: String, groupId: String, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupMessage(groupKey, groupId, actionType, toJson(properties), toJson(modifiers))) + } yield r + + + def groupRemove(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupRemove(groupKey, groupId, toJson(properties), toJson(modifiers))) + } yield r + + + def groupSet(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupSet(groupKey, groupId, toJson(properties), toJson(modifiers))) + } yield r + + + def groupSetOnce(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupSetOnce(groupKey, groupId, toJson(properties), toJson(modifiers))) + } yield r + + + def groupUnion(groupKey: String, groupId: String, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupUnion(groupKey, groupId, properties.asJava, toJson(modifiers))) + } yield r + + + def groupUnset(groupKey: String, groupId: String, propertyNames: List[String], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupUnset(groupKey, groupId, propertyNames.asJava, toJson(modifiers))) + } yield r + + + def increment(id: UUID, properties: Map[String, Long], modifiers: Map[String, String] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.increment(id.toString, properties.view.mapValues(Long.box).toMap.asJava, toJson(modifiers))) + } yield r + + + def peopleMessage(id: UUID, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.peopleMessage(id.toString, actionType, toJson(properties), toJson(modifiers))) + } yield r + + + def remove(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.remove(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def send(messages: List[JSONObject]) = { + val delivery = new ClientDelivery() + messages.foreach(delivery.addMessage) + ZIO.from(api.deliver(delivery)).unit + } + + def set(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.set(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def setOnce(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.setOnce(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def trackCharge(id: UUID, amount: Double, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.trackCharge(id.toString, amount, toJson(properties), toJson(modifiers))) + } yield r + + + def union(id: UUID, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.union(id.toString, properties.asJava, toJson(modifiers))) + } yield r + + + def unset(id: UUID, propertyNames: List[String], modifiers: Map[String, String] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.unset(id.toString, propertyNames.asJava, toJson(modifiers))) + } yield r + + + private def toJson(properties: Map[String, Object]) = { + val json = new JSONObject + properties.foreach(p => json.put(p._1, p._2)) + json + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/mixpanel/Mixpanel.scala b/jvm/src/main/scala/com/harana/modules/mixpanel/Mixpanel.scala new file mode 100644 index 0000000..662f9ad --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/mixpanel/Mixpanel.scala @@ -0,0 +1,48 @@ +package com.harana.modules.mixpanel + +import org.json.{JSONArray, JSONObject} +import zio.Task +import zio.macros.accessible + +import java.util.UUID + +@accessible +trait Mixpanel { + def append(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def delete(id: UUID, modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def event(id: UUID, name: String, properties: Map[String, Object]): Task[JSONObject] + + def groupDelete(groupKey: String, groupId: String, modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupMessage(groupKey: String, groupId: String, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupRemove(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupSet(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupSetOnce(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupUnion(groupKey: String, groupId: String, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupUnset(groupKey: String, groupId: String, propertyNames: List[String], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def increment(id: UUID, properties: Map[String, Long], modifiers: Map[String, String] = Map()): Task[JSONObject] + + def peopleMessage(id: UUID, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def remove(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def send(messages: List[JSONObject]): Task[Unit] + + def set(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def setOnce(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def trackCharge(id: UUID, amount: Double, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def union(id: UUID, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def unset(id: UUID, propertyNames: List[String], modifiers: Map[String, String] = Map()): Task[JSONObject] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/LiveOgnl.scala b/jvm/src/main/scala/com/harana/modules/ognl/LiveOgnl.scala new file mode 100644 index 0000000..2adcafd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/LiveOgnl.scala @@ -0,0 +1,32 @@ +package com.harana.modules.ognl + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.ognl.models.{OgnlMemberAccess, OgnlObjectPropertyAccessor} +import ognl.{DefaultClassResolver, OgnlRuntime, Ognl => jOgnl} +import zio.{Task, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveOgnl { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveOgnl(config, logger, micrometer) + } +} + +case class LiveOgnl(config: Config, logger: Logger, micrometer: Micrometer) extends Ognl { + + OgnlRuntime.setPropertyAccessor(classOf[Object], new OgnlObjectPropertyAccessor()) + + def render(expression: String, context: Map[String, Any]): Task[Any] = { + val ognlContext = jOgnl.createDefaultContext(context.asJava, new OgnlMemberAccess, new DefaultClassResolver, null) + val ognlExpression = jOgnl.parseExpression(expression) + ZIO.succeed(jOgnl.getValue(ognlExpression, ognlContext, context.asJava)) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/Ognl.scala b/jvm/src/main/scala/com/harana/modules/ognl/Ognl.scala new file mode 100644 index 0000000..6434919 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/Ognl.scala @@ -0,0 +1,9 @@ +package com.harana.modules.ognl + +import zio.Task +import zio.macros.accessible + +@accessible +trait Ognl { + def render(expression: String, context: Map[String, Any]): Task[Any] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlMemberAccess.java b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlMemberAccess.java new file mode 100644 index 0000000..57429fc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlMemberAccess.java @@ -0,0 +1,34 @@ +package com.harana.modules.ognl.models; + +import ognl.MemberAccess; + +import java.lang.reflect.AccessibleObject; +import java.lang.reflect.Member; +import java.lang.reflect.Modifier; +import java.util.Map; + +public class OgnlMemberAccess implements MemberAccess { + + @Override + public Object setup(Map context, Object target, Member member, String propertyName) { + Object result = null; + if (isAccessible(context, target, member, propertyName)) { + AccessibleObject accessible = (AccessibleObject) member; + if (!accessible.isAccessible()) { + result = Boolean.FALSE; + accessible.setAccessible(true); + } + } + return result; + } + + @Override + public void restore(Map context, Object target, Member member, String propertyName, Object state) { + if (state != null) ((AccessibleObject) member).setAccessible(((Boolean) state)); + } + + @Override + public boolean isAccessible(Map context, Object target, Member member, String propertyName) { + return Modifier.isPublic(member.getModifiers()); + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlObjectPropertyAccessor.scala b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlObjectPropertyAccessor.scala new file mode 100644 index 0000000..11b29c1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlObjectPropertyAccessor.scala @@ -0,0 +1,79 @@ +package com.harana.modules.ognl.models + +import ognl.{ObjectPropertyAccessor, OgnlContext, OgnlException, OgnlRuntime} + +import java.beans.IntrospectionException +import java.util +import scala.jdk.CollectionConverters._ + +class OgnlObjectPropertyAccessor extends ObjectPropertyAccessor { + + override def getPossibleProperty(context: util.Map[_, _], target: scala.Any, name: String): AnyRef = { + if (isCaseClass(target)) { + getCaseClassFieldValueByName(target, name) match { + case Some(x) => return x.asInstanceOf[AnyRef] + case None => + } + } + + var result: Object = null + val ognlContext: OgnlContext = context.asInstanceOf[OgnlContext] + + try { + result = OgnlRuntime.getMethodValue(ognlContext, target, name, true) + if (result == OgnlRuntime.NotFound) { + result = OgnlRuntime.getFieldValue(ognlContext, target, name, true) + } + } + catch { + case ex: IntrospectionException => throw new OgnlException(name, ex) + case ex: OgnlException => throw ex + case ex: Exception => throw new OgnlException(name, ex) + } + + result + } + + def getCaseClassFieldValueByName(targetClass: Any, fieldName: String): Option[Any] = { + val productInstance = targetClass.asInstanceOf[Product] + val fieldsNameToValueMap = productInstance.getClass.getDeclaredFields.map(_.getName) + .zip(productInstance.productIterator).toMap + fieldsNameToValueMap.get(fieldName) + } + + def isCaseClass(instance: Any) = { + import reflect.runtime.universe._ + val typeMirror = runtimeMirror(instance.getClass.getClassLoader) + val instanceMirror = typeMirror.reflect(instance) + val symbol = instanceMirror.symbol + symbol.isCaseClass + } + + override def setPossibleProperty(context: util.Map[_, _], target: scala.Any, name: String, value: scala.Any): AnyRef = super.setPossibleProperty(context, target, name, value) + + override def hasGetProperty(context: OgnlContext, target: scala.Any, oname: scala.Any): Boolean = super.hasGetProperty(context, target, oname) + + override def hasGetProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any): Boolean = super.hasGetProperty(context, target, oname) + + override def hasSetProperty(context: OgnlContext, target: scala.Any, oname: scala.Any): Boolean = super.hasSetProperty(context, target, oname) + + override def hasSetProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any): Boolean = super.hasSetProperty(context, target, oname) + + override def getProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any): AnyRef = { + val result = super.getProperty(context, target, oname) + result match { + case c: List[_] => c.asJava + case c: Map[_, _] => c.asJava + case c: Set[_] => c.asJava + case _ => result + } + } + + override def setProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any, value: scala.Any): Unit = super.setProperty(context, target, oname, value) + + override def getPropertyClass(context: OgnlContext, target: scala.Any, index: scala.Any): Class[_] = super.getPropertyClass(context, target, index) + + override def getSourceAccessor(context: OgnlContext, target: scala.Any, index: scala.Any): String = super.getSourceAccessor(context, target, index) + + override def getSourceSetter(context: OgnlContext, target: scala.Any, index: scala.Any): String = super.getSourceSetter(context, target, index) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ohc/LiveOHC.scala b/jvm/src/main/scala/com/harana/modules/ohc/LiveOHC.scala new file mode 100644 index 0000000..01d2a46 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ohc/LiveOHC.scala @@ -0,0 +1,85 @@ +package com.harana.modules.ohc + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import org.caffinitas.ohc.{CacheLoader, DirectValueAccess, OHCache, OHCacheBuilder} +import zio.{ZIO, ZLayer} + +import java.nio.ByteBuffer +import scala.jdk.CollectionConverters._ + +object LiveOHC { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveOHC(config, logger, micrometer) + } +} + +case class LiveOHC(config: Config, logger: Logger, micrometer: Micrometer) extends OHC { + + def newCache[K, V](hashTableSize: Option[Int] = None, + chunkSize: Option[Int] = None, + capacity: Option[Long] = None, + segmentCount: Option[Int] = None) = { + val builder = OHCacheBuilder.newBuilder[K, V]() + if (hashTableSize.nonEmpty) builder.hashTableSize(hashTableSize.get) + if (chunkSize.nonEmpty) builder.chunkSize(chunkSize.get) + if (capacity.nonEmpty) builder.capacity(capacity.get) + if (segmentCount.nonEmpty) builder.segmentCount(segmentCount.get) + ZIO.succeed(builder.build()) + } + + def put[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None) = + ZIO.succeed(if (expireAt.nonEmpty) cache.put(key, value, expireAt.get) else cache.put(key, value)) + + + def putIfAbsent[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None) = + ZIO.succeed(if (expireAt.nonEmpty) cache.put(key, value, expireAt.get) else cache.putIfAbsent(key, value)) + + + def putAll[K, V](cache: OHCache[K, V], values: Map[K, V]) = + ZIO.succeed(cache.putAll(values.asJava)) + + + def addOrReplace[K, V](cache: OHCache[K, V], key: K, oldValue: V, newValue: V, expireAt: Option[Long] = None) = + ZIO.succeed(if (expireAt.nonEmpty) cache.addOrReplace(key, oldValue, newValue, expireAt.get) else cache.addOrReplace(key, oldValue, newValue)) + + + def remove[K, V](cache: OHCache[K, V], key: K) = + ZIO.succeed(cache.remove(key)) + + + def removeAll[K, V](cache: OHCache[K, V], keys: Set[K]) = + ZIO.succeed(cache.removeAll(keys.asJava)) + + + def clear[K, V](cache: OHCache[K, V]) = + ZIO.succeed(cache.clear()) + + + def get[K, V](cache: OHCache[K, V], key: K) = + ZIO.succeed(cache.get(key)) + + +// FIXME +// def getAsBytes[K, V](cache: OHCache[K, V], key: K, updateLRU: Boolean = false) = +// ZIO.acquireReleaseWith[OHCache[K, V], DirectValueAccess, ByteBuffer](ZIO.succeed(cache.getDirect(key, updateLRU)), d => ZIO.succeed(d.close()), d => ZIO.succeed(d.buffer())) + + + def getWithLoader[K, V](cache: OHCache[K, V], key: K, loader: CacheLoader[K, V], expireAt: Option[Long] = None) = + ZIO.fromFutureJava( + if (expireAt.nonEmpty) cache.getWithLoaderAsync(key, loader, expireAt.get) else cache.getWithLoaderAsync(key, loader) + ).orDie + + + def containsKey[K, V](cache: OHCache[K, V], key: K) = + ZIO.succeed(cache.containsKey(key)) + + + def size[K, V](cache: OHCache[K, V]) = + ZIO.succeed(cache.size()) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ohc/OHC.scala b/jvm/src/main/scala/com/harana/modules/ohc/OHC.scala new file mode 100644 index 0000000..dc3abc4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ohc/OHC.scala @@ -0,0 +1,39 @@ +package com.harana.modules.ohc + +import org.caffinitas.ohc.{CacheLoader, OHCache} +import zio.UIO +import zio.macros.accessible + +import java.nio.ByteBuffer + +@accessible +trait OHC { + def newCache[K, V](hashTableSize: Option[Int] = None, + chunkSize: Option[Int] = None, + capacity: Option[Long] = None, + segmentCount: Option[Int] = None): UIO[OHCache[K, V]] + + def put[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None): UIO[Boolean] + + def putIfAbsent[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None): UIO[Boolean] + + def putAll[K, V](cache: OHCache[K, V], values: Map[K, V]): UIO[Unit] + + def addOrReplace[K, V](cache: OHCache[K, V], key: K, oldValue: V, newValue: V, expireAt: Option[Long] = None): UIO[Boolean] + + def remove[K, V](cache: OHCache[K, V], key: K): UIO[Boolean] + + def removeAll[K, V](cache: OHCache[K, V], keys: Set[K]): UIO[Unit] + + def clear[K, V](cache: OHCache[K, V]): UIO[Unit] + + def get[K, V](cache: OHCache[K, V], key: K): UIO[V] + +// def getAsBytes[K, V](cache: OHCache[K, V], key: K, updateLRU: Boolean = false): UIO[ByteBuffer] + + def getWithLoader[K, V](cache: OHCache[K, V], key: K, loader: CacheLoader[K, V], expiresAt: Option[Long] = None): UIO[V] + + def containsKey[K, V](cache: OHCache[K, V], key: K): UIO[Boolean] + + def size[K, V](cache: OHCache[K, V]): UIO[Long] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/plugin/LivePlugin.scala b/jvm/src/main/scala/com/harana/modules/plugin/LivePlugin.scala new file mode 100644 index 0000000..d55243d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/LivePlugin.scala @@ -0,0 +1,170 @@ +package com.harana.modules.plugin + +import com.harana.models.{PluginInfo, PluginServiceInfo} +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.ohc.OHC +import com.harana.modules.plugin.LivePlugin._ +import com.harana.modules.plugin.models.PluginError +import com.harana.sdk.shared.plugin.Service +import com.harana.sdk.shared.plugin.Service.{ServiceId => PluginServiceId} +import org.apache.felix.framework.Felix +import org.osgi.framework.Constants +import zio._ + +import java.io.File +import java.net.URL +import java.util +import scala.reflect.ClassTag + +object LivePlugin { + + val scalazVersion = "7.2.7" + val scalaVersion = scala.util.Properties.versionNumberString + + val systemPackages = + "scala;version=" + scalaVersion + + ",scala.annotation;version=" + scalaVersion + + ",scala.collection;version=" + scalaVersion + + ",scala.collection.convert;version=" + scalaVersion + + ",scala.collection.generic;version=" + scalaVersion + + ",scala.collection.immutable;version=" + scalaVersion + + ",scala.collection.mutable;version=" + scalaVersion + + ",scala.collection.parallel;version=" + scalaVersion + + ",scala.collection.parallel.immutable;version=" + scalaVersion + + ",scala.collection.parallel.mutable;version=" + scalaVersion + + ",scala.collection.script;version=" + scalaVersion + + ",scala.concurrent;version=" + scalaVersion + + ",scala.concurrent.duration;version=" + scalaVersion + + ",scala.io;version=" + scalaVersion + + ",scala.math;version=" + scalaVersion + + ",scala.reflect.api;version=" + scalaVersion + + ",scala.reflect.internal;version=" + scalaVersion + + ",scala.reflect.internal.util;version=" + scalaVersion + + ",scala.reflect;version=" + scalaVersion + + ",scala.reflect.macros.blackbox;version=" + scalaVersion + + ",scala.reflect.macros.whitebox;version=" + scalaVersion + + ",scala.reflect.macros.contexts;version=" + scalaVersion + + ",scala.reflect.macros;version=" + scalaVersion + + ",scala.runtime;version=" + scalaVersion + + ",scala.runtime.java8;version=" + scalaVersion + + ",scala.tools.nsc;version=" + scalaVersion + + ",scala.tools.nsc.ast;version=" + scalaVersion + + ",scala.tools.nsc.typechecker;version=" + scalaVersion + + ",scala.sys;version=" + scalaVersion + + ",scala.util;version=" + scalaVersion + + ",scala.util.control;version=" + scalaVersion + + ",scala.util.hashing;version=" + scalaVersion + + ",scala.annotation;version=" + scalaVersion + + ",scala.util.matching;version=" + scalaVersion + + ",scala.xml;version=1.0.6" + + ",scalaz;version=" + scalazVersion + + ",scalaz.syntax;version=" + scalazVersion + + ",org.osgi.framework;version=1.6.0" + + ",org.osgi.service,services,utils,com.fasterxml.jackson,play,io.gatling.jsonpath,android.util,org.joda.convert,org.apache.felix.scr,org.slf4j,io.dropwizard.metrics.healthchecks" + + val sdkPackages = + "com.harana.sdk.components" + + ",com.harana.sdk.components.basic" + + ",com.harana.sdk.components.cards" + + ",com.harana.sdk.components.elements" + + ",com.harana.sdk.components.lists" + + ",com.harana.sdk.components.maps" + + ",com.harana.sdk.components.panels" + + ",com.harana.sdk.components.structure" + + ",com.harana.sdk.components.widgets" + + ",com.harana.sdk.models" + + ",com.harana.sdk.parameters" + + ",com.harana.sdk.plugin" + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LivePlugin(config, logger, micrometer) + } +} + +case class LivePlugin(config: Config, logger: Logger, micrometer: Micrometer) extends Plugin { + + private val bundlesDirectory = config.string("plugins.bundleDir").map(f => new File(f)) + private val pluginsDirectory = config.string("plugins.pluginsDir").map(f => new File(f)) + private val cacheDirectory = config.string("plugins.cacheDir").map(f => new File(f)) + + private val bundleContext = + for { + bundlesDir <- bundlesDirectory + pluginsDir <- pluginsDirectory + props <- ZIO.succeed { + val props = new util.HashMap[String, String] + props.put(Constants.FRAMEWORK_SYSTEMPACKAGES_EXTRA, systemPackages + "," + sdkPackages) + props.put(Constants.FRAMEWORK_STORAGE_CLEAN, Constants.FRAMEWORK_STORAGE_CLEAN_ONFIRSTINIT) + props.put(Constants.FRAMEWORK_STORAGE, cacheDirectory.toString) + props.put("felix.shutdown.hook", "true") + props.put("felix.service.urlhandlers", "true") + props.put("felix.fileinstall.dir", pluginsDir.getAbsolutePath) + props.put("felix.fileinstall.noInitialDelay", "true") + props.put("felix.fileinstall.log.level", "4") + props + } + felix = new Felix(props) + _ <- ZIO.attempt(felix.start()).mapError(PluginError.Exception) + _ <- installSystemBundles(bundlesDir, felix.getBundleContext) + _ <- ZIO.attempt(felix.init()).mapError(PluginError.Exception) + } yield felix.getBundleContext + + + def findPlugins[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginInfo]] = + for { + context <- bundleContext + plugins = context.getBundles.map(b => PluginInfo(b.getSymbolicName, "", b.getVersion.getMajor.toLong)).toList + } yield plugins + + + def findServices[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, Map[PluginServiceId, T]] = + for { + context <- bundleContext + references = context.getAllServiceReferences(cmf.runtimeClass.getName, null) + services <- { + if (references == null) ZIO.fail(PluginError.NoServicesFound) + else ZIO.succeed( + references.map { ref => + val id = ref.getProperty("id").asInstanceOf[PluginServiceId] + val service = context.getService(ref).asInstanceOf[T] + (id, service) + }.toMap + ) + } + } yield services + + + def findServiceInfos[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginServiceInfo]] = + for { + services <- findServices + serviceInfos = services.map { case (serviceId, any) => PluginServiceInfo()}.toList + } yield serviceInfos + + + def getService[T <: Service](serviceId: PluginServiceId)(implicit cmf: ClassTag[T]): IO[PluginError, T] = + for { + services <- findServices(cmf) + service <- services.get(serviceId) match { + case Some(x) => ZIO.succeed(x) + case None => ZIO.fail(PluginError.NoServiceFound) + } + } yield service + + + def getResource(className: String, resourcePath: String): IO[PluginError, URL] = + for { + context <- bundleContext + resource <- context.getBundles + .find(_.getEntry(className.replace(".", "/") + ".class") != null) + .map(_.getEntry(resourcePath)) match { + case Some(x) => ZIO.succeed(x) + case None => ZIO.fail(PluginError.NoResourceFound) + } + } yield resource +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/plugin/Plugin.scala b/jvm/src/main/scala/com/harana/modules/plugin/Plugin.scala new file mode 100644 index 0000000..dedf56b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/Plugin.scala @@ -0,0 +1,24 @@ +package com.harana.modules.plugin + +import com.harana.models.{PluginInfo, PluginServiceInfo} +import com.harana.modules.plugin.models.PluginError +import com.harana.sdk.shared.plugin.Service +import com.harana.sdk.shared.plugin.Service.ServiceId +import zio.IO +import zio.macros.accessible + +import java.net.URL +import scala.reflect.ClassTag + +@accessible +trait Plugin { + def findPlugins[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginInfo]] + + def findServices[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, Map[ServiceId, T]] + + def findServiceInfos[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginServiceInfo]] + + def getService[T <: Service](serviceId: ServiceId)(implicit cmf: ClassTag[T]): IO[PluginError, T] + + def getResource(className: String, fileName: String): IO[PluginError, URL] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/plugin/models/PluginError.scala b/jvm/src/main/scala/com/harana/modules/plugin/models/PluginError.scala new file mode 100644 index 0000000..27d650d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/models/PluginError.scala @@ -0,0 +1,9 @@ +package com.harana.modules.plugin.models + +sealed trait PluginError +object PluginError { + case object NoResourceFound extends PluginError + case object NoServicesFound extends PluginError + case object NoServiceFound extends PluginError + case class Exception(t: Throwable) extends PluginError +} diff --git a/jvm/src/main/scala/com/harana/modules/plugin/package.scala b/jvm/src/main/scala/com/harana/modules/plugin/package.scala new file mode 100644 index 0000000..b258a7e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/package.scala @@ -0,0 +1,45 @@ +package com.harana.modules + +import com.harana.modules.plugin.models.PluginError +import org.osgi.framework.BundleContext +import zio.{IO, ZIO} + +import java.io.File +import java.nio.file.{Files, Path, Paths} + +package object plugin { + + def installPlugin(bundleContext: BundleContext, bundleLocation: String): IO[PluginError, Unit] = + ZIO.attempt { + bundleContext.installBundle(bundleLocation).start() + }.mapError(PluginError.Exception) + + def installSystemBundles(bundlesDirectory: File, bundleContext: BundleContext): IO[PluginError, Unit] = + for { + files <- ZIO.succeed(bundlesDirectory.listFiles.filter(_.isFile).filter(_.getName.endsWith("jar"))) + bundles <- ZIO.attempt(files.map(b => bundleContext.installBundle(s"file:${b.getAbsolutePath}"))).mapError(PluginError.Exception) + _ <- ZIO.attempt(bundles.foreach(_.start())).mapError(PluginError.Exception) + } yield () + + def uninstallPlugin(bundleContext: BundleContext, bundleLocation: String): IO[PluginError, Unit] = + ZIO.attempt { + bundleContext.getBundles + .filter(_.getLocation == bundleLocation) + .foreach { bundle => + bundle.uninstall() + } + }.mapError(PluginError.Exception) + + def removePlugin(pluginsDirectory: File, pluginName: String): IO[PluginError, Unit] = + ZIO.attempt { + pluginsDirectory.listFiles + .filter(_.isFile) + .filter(_.getName == pluginName) + .foreach(file => if (file.exists()) file.delete()) + }.mapError(PluginError.Exception) + + def copyPlugin(pluginsDirectory: File, filePath: String): IO[PluginError, Path] = + ZIO.attempt { + Files.copy(Paths.get(filePath), Paths.get(pluginsDirectory + "/" + new File(filePath).getName)) + }.mapError(PluginError.Exception) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/LiveProjects.scala b/jvm/src/main/scala/com/harana/modules/projects/LiveProjects.scala new file mode 100644 index 0000000..efd48f7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/LiveProjects.scala @@ -0,0 +1,351 @@ +package com.harana.modules.projects + +import com.harana.designer.backend.modules.projects.models.{Trigger, _} +import com.harana.modules.argo.events.EventSource._ +import com.harana.modules.argo.events.Rollout.{BlueGreen, Canary, Rollout, Strategy} +import com.harana.modules.argo.events.Sensor.{EventDependency, Http, K8SResource, Sensor, Subscription, TriggerTemplate} +import com.harana.modules.argo.events.Trigger.{K8SSource, K8STrigger} +import com.harana.modules.argo.events._ +import com.harana.modules.argo.workflows._ +import com.harana.modules.argo.{EnvironmentVariable, ObjectMetadata, Requests, Resources, VolumeMount, Container => ArgoContainer, Template => ArgoTemplate} +import com.harana.modules.buildpack.Buildpack +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.docker.Docker +import com.harana.modules.git.Git +import com.harana.modules.kubernetes.Kubernetes +import com.harana.modules.vertx.Vertx +import io.circe.yaml.parser +import io.scalaland.chimney.dsl._ +import org.apache.commons.io.FileUtils +import org.eclipse.jgit.api.{Git => JGit} +import zio.{Clock, _} + +import java.io.{File => JFile} +import java.nio.charset.Charset +import java.nio.file.Files +import java.text.SimpleDateFormat +import java.util.Date +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveProjects { + val layer = ZLayer { + for { + buildpack <- ZIO.service[Buildpack] + config <- ZIO.service[Config] + docker <- ZIO.service[Docker] + git <- ZIO.service[Git] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + vertx <- ZIO.service[Vertx] + } yield LiveProjects(buildpack, config, docker, git, kubernetes, logger, micrometer, vertx) + } +} + +case class LiveProjects(buildpack: Buildpack, + config: Config, + docker: Docker, + git: Git, + kubernetes: Kubernetes, + logger: Logger, + micrometer: Micrometer, + vertx: Vertx) extends Projects { + + private val projectsRepository = new AtomicReference[JGit]() + private val allProjects = new AtomicReference[Set[Project]](Set()) + private val allRepositories = new AtomicReference[Map[(Project, String), JGit]](Map()) + private val tempDirectory = Files.createTempDirectory("harana-projects").toFile + private val dateFormatter = new SimpleDateFormat("yyyy-MM-dd@HH:mm:ss") + + private val repositoryAuthConfig = + for { + username <- config.optString("projects.docker.repository.username") + password <- config.optString("projects.docker.repository.password") + identityToken <- config.optString("projects.docker.repository.identityToken") + registryToken <- config.optString("projects.docker.repository.registryToken") + authConfig = dockerAuthConfig(username, password, identityToken, registryToken) + } yield authConfig + + + def setup(namespace: String): Task[Unit] = + for { + authConfig <- repositoryAuthConfig + + _ <- logger.info("Creating Argo Events/Workflow CRDs") + client <- kubernetes.newClient + _ <- kubernetes.save(client, namespace, EventSource.crd) + _ <- kubernetes.save(client, namespace, Rollout.crd) + _ <- kubernetes.save(client, namespace, Sensor.crd) + _ <- kubernetes.save(client, namespace, Workflow.crd) + _ <- kubernetes.save(client, namespace, WorkflowTemplate.crd) + _ <- kubernetes.close(client) + + _ <- logger.info("Pulling Python/Scala build images") + pythonImage <- config.optString("projects.build.pythonImage") + scalaImage <- config.optString("projects.build.scalaImage") + _ <- ZIO.when(pythonImage.nonEmpty)(docker.pullImage(pythonImage.get)) + _ <- ZIO.when(scalaImage.nonEmpty)(docker.pullImage(scalaImage.get, authConfig)) + + _ <- logger.info("Setting default Buildpack builder") + defaultBuilder <- config.optString("projects.build.buildpack.defaultBuilder") + _ <- ZIO.when(defaultBuilder.nonEmpty)(buildpack.setDefaultBuilder(defaultBuilder.get)) + + _ <- logger.info("Cloning Projects Git repository") + _ <- cloneProjects + + } yield () + + + def startMonitoring(namespace: String): Task[Unit] = + for { + _ <- refreshProjects.repeat(Schedule.spaced(5.seconds)) + } yield () + + + def stopMonitoring(namespace: String): Task[Unit] = { + ZIO.unit + } + + private def cloneProjects: Task[Unit] = + for { + url <- config.string("projects.git.url") + branch <- config.string("projects.git.branch", "master") + username <- config.optString("projects.git.username") + password <- config.optString("projects.git.password") + oauthToken <- config.optString("projects.git.oauthToken") + gitRepository <- git.clone(url, tempDirectory, Some(branch), username, password, oauthToken) + _ = projectsRepository.set(gitRepository) + } yield () + + + private def refreshProjects: Task[Set[Project]] = + for { + hasChanged <- git.hasChanged(projectsRepository.get) + _ <- logger.debug(s"Refreshing Projects Git repository: ${if (hasChanged) "changed" else "not changed" }") + projects <- ZIO.ifZIO(ZIO.succeed(hasChanged))(changedProjects(projectsRepository.get), ZIO.attempt(Set[Project]())) + } yield projects + + + private def cloneRepository(project: Project, repository: Repository): Task[Unit] = + for { + branchTagOrCommit <- ZIO.succeed(repository.git.branch.orElse(repository.git.tag.orElse(repository.git.commit.orElse(None)))) + directory = new JFile(tempDirectory, s"$project/${repository.name}") + gitRepository <- git.clone(repository.git.url, directory, None, repository.git.username, repository.git.password, repository.git.oauthToken) + _ <- git.checkout(gitRepository, branchTagOrCommit.getOrElse("master")) + _ = allRepositories.set(allRepositories.get + ((project, repository.name) -> gitRepository)) + } yield () + + + private def changedProjects(projectsRepo: JGit): Task[Set[Project]] = + for { + foundProjects <- findProjects(projectsRepo.getRepository.getDirectory.getParentFile) + changedProjects = allProjects.get.filterNot(foundProjects) + _ <- logger.debug(s"Changed projects: ${changedProjects.map(_.title).mkString(", ")}").when(changedProjects.nonEmpty) + } yield changedProjects + + + private def findProjects(directory: JFile): Task[Set[Project]] = + for { + files <- ZIO.succeed(FileUtils.listFiles(directory, Array("yml"), true).asScala.toList) + _ <- logger.debug(s"Found files: ${files.map(_.getAbsolutePath).mkString(", ")}") + ymls <- ZIO.attempt(files.map(f => (f.getName, FileUtils.readFileToString(f, Charset.defaultCharset())))) + parsedProjects <- ZIO.foreach(ymls)(parseProject) + foundProjects = parsedProjects.filter(_.nonEmpty).map(_.get).toSet + _ <- logger.info(s"Found projects: ${foundProjects.map(_.title)}") + } yield foundProjects + + + private def parseProject(yml: (String, String)): UIO[Option[Project]] = + for { + json <- ZIO.succeed(parser.parse(yml._2)) + _ <- ZIO.when(json.isLeft)(logger.error(s"Failed to parse YAML: ${yml._1} due to error: ${json.left.get.getMessage}")) + _ <- ZIO.when(json.isRight && json.toOption.get.as[Project].isLeft)(logger.error(s"Failed to parse project: ${yml._1} due to error: ${json.toOption.get.as[Project].left.get.getMessage}")) + project = json.toOption.flatMap(_.as[Project].toOption) + } yield project + + + private def refreshRepositories(project: Project): Task[Set[Repository]] = + project.repositories match { + case Some(repositories) => + for { + _ <- logger.debug(s"Refreshing repositories for project: ${project.title}") + changedReposRef <- Ref.make(Set[Repository]()) + _ <- ZIO.foreach(repositories) { r => + if (allRepositories.get.contains((project, r.name))) + for { + gitRepo <- ZIO.succeed(allRepositories.get()((project, r.name))) + hasChanged <- git.hasChanged(gitRepo) + _ <- changedReposRef.getAndUpdate(_ + r).when(hasChanged) + } yield () + else + cloneRepository(project, r) *> changedReposRef.getAndUpdate(_ + r) + } + changedRepos <- changedReposRef.get + } yield changedRepos + case None => ZIO.attempt(Set()) + } + + + private def buildContainers(project: Project): Task[List[Boolean]] = + for { + _ <- logger.debug(s"Building containers for project: ${project.title}") + successes <- ZIO.foreachPar(project.containers) { c => + val imageName = s"${name(project.title)}_${name(c.name)}" + val date = dateFormatter.format(new Date()) + + if (c.auto.map(_.repository).nonEmpty) + for { + git <- ZIO.succeed(allRepositories.get()((project, c.docker.get.repository.get))) + success <- buildpack.build(s"$imageName:$date", git.getRepository.getDirectory).map(_.mkString(",").contains("ERROR")) + } yield success + + if (c.docker.map(_.repository).nonEmpty) + for { + git <- ZIO.succeed(allRepositories.get()((project, c.docker.get.repository.get))) + dockerFile = new JFile(git.getRepository.getDirectory, c.docker.get.path.getOrElse("Dockerfile")) + success <- docker.buildImage(dockerFile, Set(date)).option.map(_.nonEmpty) + } yield success + + ZIO.succeed(false) + } + } yield successes + + + private def pipelineTriggers(project: Project): UIO[List[(Pipeline, List[Trigger])]] = + ZIO.succeed(project.pipelines.map(_.map(p => (p, p.start.triggers.getOrElse(List())))).getOrElse(List())) + + + private def eventSources(eventBusName: String, project: Project): UIO[List[EventSource]] = + for { + triggers <- pipelineTriggers(project).map(_.flatMap(_._2)) + eventSources <- ZIO.foreach(triggers) { trigger => + for { + eventSourceName <- ZIO.succeed(s"${name(project.title)}-${name(trigger.name)}-eventsource") + spec = EventSource.Spec( + calendar = trigger.calendar.map(c => Map(trigger.name -> c)).getOrElse(Map()), + eventBusName = eventBusName, + file = trigger.file.map(c => Map(trigger.name -> c)).getOrElse(Map()), + github = trigger.github.map(c => Map(trigger.name -> c)).getOrElse(Map()), + gitlab = trigger.gitlab.map(c => Map(trigger.name -> c)).getOrElse(Map()), + hdfs = trigger.hdfs.map(c => Map(trigger.name -> c)).getOrElse(Map()), + kafka = trigger.kafka.map(c => Map(trigger.name -> c)).getOrElse(Map()), + redis = trigger.redis.map(c => Map(trigger.name -> c)).getOrElse(Map()), + resource = trigger.resource.map(c => Map(trigger.name -> c)).getOrElse(Map()), + slack = trigger.slack.map(c => Map(trigger.name -> c)).getOrElse(Map()), + sns = trigger.sns.map(c => Map(trigger.name -> c)).getOrElse(Map()), + sqs = trigger.sqs.map(c => Map(trigger.name -> c)).getOrElse(Map()), + stripe = trigger.stripe.map(c => Map(trigger.name -> c)).getOrElse(Map()), + webhook = trigger.webhook.map(c => Map(trigger.name -> c)).getOrElse(Map()) + ) + eventSource = EventSource(eventSourceName, spec) + } yield eventSource + } + } yield eventSources + + + private def rollouts(project: Project): UIO[List[Rollout]] = + project.daemons match { + case Some(daemons) => ZIO.foreach(daemons) { daemon => + for { + strategy <- ZIO.succeed(daemon.strategy.map(s => Strategy(s.blueGreen.map(_.into[BlueGreen].transform), s.canary.map(_.into[Canary].transform)))) + containers <- ZIO.foreach(daemon.containers)(argoContainer(project, _)) + spec = Rollout.Spec(minReadySeconds = daemon.minReadySeconds, replicas = daemon.replicas, revisionHistoryLimit = daemon.revisionHistoryLimit, strategy = strategy) + rollout = Rollout(daemon.name, spec) + } yield rollout + } + case None => ZIO.succeed(List.empty) + } + + + private def sensors(project: Project): UIO[List[Sensor]] = + for { + pipelineTriggers <- pipelineTriggers(project).map(pt => pt.filter(_._2.isEmpty)) + argoContainer = ArgoContainer( + name = "sensor", + image = "argoproj/sensor:v0.13.0", + imagePullPolicy = Some("Always") + ) + template = ArgoTemplate(container = Some(argoContainer)) + sensors <- ZIO.foreach(pipelineTriggers){ pipelineTrigger => + for { + prefix <- ZIO.succeed(s"${name(project.title)}-${name(pipelineTrigger._1.name)}") + dependencies = pipelineTrigger._2.map(t => EventDependency(s"${name(t.name)}-gateway", s"${name(project.title)}-${name(t.name)}-gateway", "example")) + subscription = Subscription(Some(Http(9300))) + metadata = ObjectMetadata(generateName = Some(s"$prefix-workflow-")) + workflow <- argoWorkflow(project, pipelineTrigger._1) + k8sResource = K8SResource("argoproj.io/v1alpha1", "Workflow", metadata, workflow) + k8sTrigger = K8STrigger("argoproj.io", "v1alpha1", "workflows", "create", K8SSource(k8sResource)) + triggers = List(Sensor.Trigger(template = TriggerTemplate(s"$prefix-workflow", k8s = Some(k8sTrigger)))) + sensor = Sensor(s"$prefix-workflow", Sensor.Spec(Some(template), dependencies, subscription = Some(subscription), triggers = triggers)) + } yield sensor + } + } yield sensors + + + private def argoWorkflow(project: Project, pipeline: Pipeline): UIO[Workflow.Spec] = + for { + entrypoint <- ZIO.succeed(pipeline.start.action) + containers <- ZIO.foreach(pipeline.actions.map(_.container))(argoContainer(project, _)) + templates = containers.map(c => Template(container = Some(c), name = c.name)) + dagTasks = pipeline.actions.map(a => DAGTask(name = Some(a.name), dependencies = a.dependencies.getOrElse(List()), template = Some(a.name))) + dagTemplate = Template(name = pipeline.name, dag = Some(DAG(tasks = dagTasks))) + workflow = Workflow.Spec(entrypoint = Some(entrypoint), templates = templates :+ dagTemplate) + } yield workflow + + + private def name(title: String) = + title.toLowerCase.replaceAll("\\s", "-") + + + private def argoContainer(project: Project, container: Container): UIO[ArgoContainer] = + for { + globalContainer <- ZIO.succeed(project.containers.filter(_.name.equals(container.name)).head) + args = container.arguments.getOrElse(globalContainer.arguments.getOrElse(List())) + command = container.command.getOrElse(globalContainer.command.getOrElse(List())) + environmentVariables = container.environmentVariables.map(_ ++ globalContainer.environmentVariables.getOrElse(List())) + envs = environmentVariables.getOrElse(List()).map(e => EnvironmentVariable(e.name, e.value)) + image = globalContainer.docker.flatMap(_.image).getOrElse(s"${globalContainer.name}:latest") + imagePullPolicy = container.imagePullPolicy.orElse(globalContainer.imagePullPolicy) + resources = Resources(container.resources.orElse(globalContainer.resources).map(_.into[Requests].transform)) + volumeMounts = container.volumeMounts.getOrElse(globalContainer.volumeMounts.getOrElse(List())).map(_.into[VolumeMount].transform) + argoContainer = ArgoContainer(args, command, envs, image, imagePullPolicy, None, container.name, Some(resources), volumeMounts) + } yield argoContainer + + + private def projectDeployed(eventBusName: String, namespace: String, project: Project): Task[Boolean] = + for { + client <- kubernetes.newClient + e <- customResourcesDeployed[EventSource](client, namespace, eventSources(eventBusName, project))(kubernetes) + r <- customResourcesDeployed[Rollout](client, namespace, rollouts(project))(kubernetes) + s <- customResourcesDeployed[Sensor](client, namespace, sensors(project))(kubernetes) + deployed = e && r && s + _ <- kubernetes.close(client) + } yield deployed + + + private def deployProject(eventBusName: String, namespace: String, project: Project): Task[Boolean] = + for { + _ <- logger.debug(s"Deploying project: ${project.title} to namespace: $namespace") + client <- kubernetes.newClient + e <- deployCustomResources[EventSource](client, namespace, eventSources(eventBusName, project))(kubernetes) + r <- deployCustomResources[Rollout](client, namespace, rollouts(project))(kubernetes) + s <- deployCustomResources[Sensor](client, namespace, sensors(project))(kubernetes) + deployed = e && r && s + _ <- kubernetes.close(client) + } yield deployed + + + private def undeployProject(eventBusName: String, namespace: String, project: Project): Task[Boolean] = + for { + _ <- logger.debug(s"Un-deploying project: ${project.title} from namespace: $namespace") + client <- kubernetes.newClient + e <- undeployCustomResources[EventSource](client, namespace, eventSources(eventBusName, project))(kubernetes) + r <- undeployCustomResources[Rollout](client, namespace, rollouts(project))(kubernetes) + s <- undeployCustomResources[Sensor](client, namespace, sensors(project))(kubernetes) + undeployed = e && r && s + _ <- kubernetes.close(client) + } yield undeployed +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/Projects.scala b/jvm/src/main/scala/com/harana/modules/projects/Projects.scala new file mode 100644 index 0000000..0160807 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/Projects.scala @@ -0,0 +1,13 @@ +package com.harana.modules.projects + +import zio.Task +import zio.macros.accessible + +@accessible +trait Projects { + def setup(namespace: String): Task[Unit] + + def startMonitoring(namespace: String): Task[Unit] + + def stopMonitoring(namespace: String): Task[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Action.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Action.scala new file mode 100644 index 0000000..85b955d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Action.scala @@ -0,0 +1,10 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Action(name: String, + container: Container, + parameters: Option[List[Parameter]], + dependencies: Option[List[String]], + withItems: Option[List[String]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Artifactory.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Artifactory.scala new file mode 100644 index 0000000..d584a33 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Artifactory.scala @@ -0,0 +1,9 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Artifactory(url: String, + path: Option[String], + username: Option[String], + password: Option[String]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Author.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Author.scala new file mode 100644 index 0000000..5b5b122 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Author.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Author(name: String, + email: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Auto.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Auto.scala new file mode 100644 index 0000000..0ebbf1e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Auto.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Auto(repository: Option[String], + path: Option[String], + builder: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/BlueGreen.scala b/jvm/src/main/scala/com/harana/modules/projects/models/BlueGreen.scala new file mode 100644 index 0000000..c683dda --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/BlueGreen.scala @@ -0,0 +1,12 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class BlueGreen(activeService: String, + previewService: String, + previewReplicaCount: Option[Int], + autoPromotionEnabled: Option[Boolean], + autoPromotionSeconds: Option[Int], + scaleDownDelaySeconds: Option[Int], + scaleDownDelayRevisionLimit: Option[Int]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Canary.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Canary.scala new file mode 100644 index 0000000..ddf6ad8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Canary.scala @@ -0,0 +1,10 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Canary(stableService: String, + canaryService: String, + steps: List[CanaryStep] = List(), + maxSurge: Option[String] = None, + maxUnavailable: Option[String] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/CanaryStep.scala b/jvm/src/main/scala/com/harana/modules/projects/models/CanaryStep.scala new file mode 100644 index 0000000..4f5f565 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/CanaryStep.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class CanaryStep(weight: Int, + pause: Option[Int]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Container.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Container.scala new file mode 100644 index 0000000..701cf87 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Container.scala @@ -0,0 +1,18 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Container(name: String, + arguments: Option[List[String]], + auto: Option[Auto], + command: Option[List[String]], + docker: Option[Docker], + environmentVariables: Option[List[EnvironmentVariable]], + imagePullPolicy: Option[String], + ports: Option[List[Port]], + python: Option[Python], + resources: Option[Resources], + scala: Option[Scala], + version: Option[String], + volumeMounts: Option[List[VolumeMount]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Daemon.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Daemon.scala new file mode 100644 index 0000000..9523d55 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Daemon.scala @@ -0,0 +1,12 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Daemon(name: String, + minReadySeconds: Option[Int], + replicas: Option[Int], + revisionHistoryLimit: Option[Int], + containers: List[Container], + start: Option[DaemonStart], + strategy: Option[Strategy]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/DaemonStart.scala b/jvm/src/main/scala/com/harana/modules/projects/models/DaemonStart.scala new file mode 100644 index 0000000..3184998 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/DaemonStart.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class DaemonStart(triggers: Option[List[Trigger]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Docker.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Docker.scala new file mode 100644 index 0000000..fbb0230 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Docker.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Docker(image: Option[String], + repository: Option[String], + path: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Email.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Email.scala new file mode 100644 index 0000000..e8fd6a8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Email.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Email(address: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/EnvironmentVariable.scala b/jvm/src/main/scala/com/harana/modules/projects/models/EnvironmentVariable.scala new file mode 100644 index 0000000..2d322e3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/EnvironmentVariable.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class EnvironmentVariable(name: String, value: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Git.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Git.scala new file mode 100644 index 0000000..ddab28f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Git.scala @@ -0,0 +1,13 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Git(url: String, + path: Option[String], + branch: Option[String], + tag: Option[String], + commit: Option[String], + username: Option[String], + password: Option[String], + oauthToken: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Notification.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Notification.scala new file mode 100644 index 0000000..9d0aa03 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Notification.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Notification(name: String, + email: Option[Email], + slack: Option[NotificationSlack]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/NotificationSlack.scala b/jvm/src/main/scala/com/harana/modules/projects/models/NotificationSlack.scala new file mode 100644 index 0000000..5288d7f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/NotificationSlack.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class NotificationSlack(channel: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Parameter.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Parameter.scala new file mode 100644 index 0000000..fdfe4eb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Parameter.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Parameter(name: String, value: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Pipeline.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Pipeline.scala new file mode 100644 index 0000000..5d3e96c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Pipeline.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Pipeline(name: String, + start: PipelineStart, + actions: List[Action]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/PipelineStart.scala b/jvm/src/main/scala/com/harana/modules/projects/models/PipelineStart.scala new file mode 100644 index 0000000..c82825b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/PipelineStart.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class PipelineStart(action: String, + triggers: Option[List[Trigger]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Port.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Port.scala new file mode 100644 index 0000000..37a529c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Port.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Port(name: String, + internal: Option[Int], + external: Option[Int]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Project.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Project.scala new file mode 100644 index 0000000..b60357b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Project.scala @@ -0,0 +1,14 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Project(title: String, + description: String, + author: Author, + parameters: Option[List[Parameter]], + repositories: Option[List[Repository]], + containers: List[Container], + pipelines: Option[List[Pipeline]], + daemons: Option[List[Daemon]], + notifications: Option[List[Notification]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Python.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Python.scala new file mode 100644 index 0000000..17d6fa1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Python.scala @@ -0,0 +1,9 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Python(repository: Option[String], + path: Option[String], + file: Option[String], + baseImage: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Repository.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Repository.scala new file mode 100644 index 0000000..1e9be50 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Repository.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Repository(name: String, + git: Git) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Resources.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Resources.scala new file mode 100644 index 0000000..4c98ead --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Resources.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Resources(cpu: Option[String], + memory: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/SBT.scala b/jvm/src/main/scala/com/harana/modules/projects/models/SBT.scala new file mode 100644 index 0000000..8771c54 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/SBT.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class SBT(memory: Option[String]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Scala.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Scala.scala new file mode 100644 index 0000000..2484c92 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Scala.scala @@ -0,0 +1,9 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Scala(repository: Option[String], + path: Option[String], + baseImage: Option[String], + sbt: Option[SBT]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Strategy.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Strategy.scala new file mode 100644 index 0000000..ca01e68 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Strategy.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Strategy(blueGreen: Option[BlueGreen], + canary: Option[Canary]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Trigger.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Trigger.scala new file mode 100644 index 0000000..a179778 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Trigger.scala @@ -0,0 +1,20 @@ +package com.harana.designer.backend.modules.projects.models + +import com.harana.modules.argo.events.EventSource._ +import io.circe.generic.JsonCodec + +@JsonCodec +case class Trigger(name: String, + calendar: Option[Calendar] = None, + file: Option[File] = None, + github: Option[Github] = None, + gitlab: Option[Gitlab] = None, + hdfs: Option[Hdfs] = None, + kafka: Option[Kafka] = None, + redis: Option[Redis] = None, + resource: Option[Resource] = None, + slack: Option[Slack] = None, + sns: Option[SNS] = None, + sqs: Option[SQS] = None, + stripe: Option[Stripe] = None, + webhook: Option[Webhook] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/VolumeMount.scala b/jvm/src/main/scala/com/harana/modules/projects/models/VolumeMount.scala new file mode 100644 index 0000000..7b057f6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/VolumeMount.scala @@ -0,0 +1,11 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class VolumeMount(mountPath: Option[String] = None, + mountPropagation: Option[String] = None, + name: String, + readOnly: Option[Boolean] = None, + subPath: Option[String] = None, + subPathExpr: Option[String] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/package.scala b/jvm/src/main/scala/com/harana/modules/projects/package.scala new file mode 100644 index 0000000..1dc6d25 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/package.scala @@ -0,0 +1,53 @@ +package com.harana.modules + +import com.github.dockerjava.api.model.AuthConfig +import com.harana.modules.kubernetes.Kubernetes +import play.api.libs.json.Format +import skuber.api.client.{KubernetesClient, LoggingContext} +import skuber.{ObjectResource, ResourceDefinition} +import zio.{Task, UIO, ZIO} + +package object projects { + + def dockerAuthConfig(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): Option[AuthConfig] = + (username, password, identityToken, registryToken) match { + case (Some(u), Some(p), _, _ ) => Some(new AuthConfig().withUsername(u).withPassword(p)) + case (_, _, Some(it), _ ) => Some(new AuthConfig().withIdentityToken(it)) + case (_, _, _, Some(rt) ) => Some(new AuthConfig().withRegistrytoken(rt)) + case (_, _, _, _) => None + } + + + def customResourcesDeployed[A <: ObjectResource](client: KubernetesClient, + namespace: String, + customResources: UIO[List[A]])(kubernetes: Kubernetes) + (implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext): Task[Boolean] = + for { + cr <- customResources + deployed <- ZIO.foreach(cr) { cr => + kubernetes.get[A](client, namespace, cr.name).map(o => cr.metadata.resourceVersion.equals(o.get.metadata.resourceVersion)).either + } + } yield deployed.exists(_.isLeft) + + + + def deployCustomResources[A <: ObjectResource](client: KubernetesClient, + namespace: String, + customResources: UIO[List[A]])(kubernetes: Kubernetes) + (implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext): Task[Boolean] = + for { + cr <- customResources + deployed <- ZIO.foreach(cr)(cr => kubernetes.create[A](client, namespace, cr).option) + } yield deployed.exists(_.nonEmpty) + + + + def undeployCustomResources[A <: ObjectResource](client: KubernetesClient, + namespace: String, + customResources: UIO[List[A]])(kubernetes: Kubernetes) + (implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext): Task[Boolean] = + for { + cr <- customResources + deployed <- ZIO.foreach(cr)(cr => kubernetes.delete[A](client, namespace, cr.name).option) + } yield deployed.exists(_.nonEmpty) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/LiveSalesforce.scala b/jvm/src/main/scala/com/harana/modules/salesforce/LiveSalesforce.scala new file mode 100644 index 0000000..813acb0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/LiveSalesforce.scala @@ -0,0 +1,105 @@ +package com.harana.modules.salesforce + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.salesforce.models.{SalesforceError, SalesforceQuota} +import io.circe.Json +import io.circe.optics.JsonPath +import zio.{IO, ZIO, ZLayer} + +object LiveSalesforce { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSalesforce(config, http, logger, micrometer) + } +} + +case class LiveSalesforce(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Salesforce { + + private val loginTokenUrl = "https://login.salesforce.com/services/oauth2/token" + + private val accessToken = + for { + username <- config.secret("salesforce-username") + password <- config.secret("salesforce-password") + clientId <- config.secret("salesforce-client-id") + clientSecret <- config.secret("salesforce-client-secret") + securityToken <- config.secret("salesforce-security-token") + grantType <- config.string("salesforce.grantType") + response <- http.postAsJson(loginTokenUrl, params = Map( + "grant_type" -> List(grantType), + "client_id" -> List(clientId), + "client_secret" -> List(clientSecret), + "username" -> List(username), + "password" -> List(s"$password$securityToken") + )).mapBoth(SalesforceError.ConnectionError, JsonPath.root.access_token.string.getOption) + } yield response + + + def quota: IO[SalesforceError, SalesforceQuota] = + null +// for { +// baseUrl <- config.string("salesforce.baseUrl") +// apiVersion <- config.int("salesforce.apiVersion") +// json <- get(s"$baseUrl/services/data/v$apiVersion/limits") +// response <- (json \ "DailyApiRequests").toOption match { +// case Some(value) => +// val max = (value \ "Max").toString.toFloat +// val remaining = (value \ "Remaining").toString.toFloat +// val used = max - remaining +// val percent: Float = (used / max) * 100 +// IO.succeed(SalesforceQuota(used.toInt, remaining.toInt, percent.toInt)) +// +// case None => +// IO.fail(SalesforceError.ParseError) +// } +// } yield response + + + def describeObject(name: String): IO[SalesforceError, Json] = + for { + baseUrl <- config.string("salesforce.baseUrl") + apiVersion <- config.int("salesforce.apiVersion") + json <- get(s"$baseUrl/services/data/v$apiVersion/sobjects/$name/describe") + } yield json + + + def objectList: IO[SalesforceError, Json] = + null +// for { +// baseUrl <- config.string("salesforce.baseUrl") +// apiVersion <- config.int("salesforce.apiVersion") +// json <- get(s"$baseUrl/services/data/v$apiVersion/sobjects") +// response <- (json \ "sobjects") match { +// case JArray(x) => IO.succeed(x) +// case _ => IO.fail(SalesforceError.ParseError) +// } +// } yield response + + + def objectNames: IO[SalesforceError, List[String]] = { + null + } +// objectList.map(_.map { +// item => (item \ "name").toString +// }) +// } + + + private def get(url: String, query: Option[String] = None): IO[SalesforceError, Json] = { + accessToken.flatMap { token => + val headers = Map( + "Authorization" -> s"Bearer $token", + "Content-Type" -> "application/json" + ) + val params = query.map { q => Map("q" -> List(q)) }.getOrElse(Map()) + http.getAsJson(url, params, headers).mapError(SalesforceError.ConnectionError) + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/Salesforce.scala b/jvm/src/main/scala/com/harana/modules/salesforce/Salesforce.scala new file mode 100644 index 0000000..4904067 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/Salesforce.scala @@ -0,0 +1,17 @@ +package com.harana.modules.salesforce + +import com.harana.modules.salesforce.models.{SalesforceError, SalesforceQuota} +import io.circe.Json +import zio.IO +import zio.macros.accessible + +@accessible +trait Salesforce { + def quota: IO[SalesforceError, SalesforceQuota] + + def describeObject(name: String): IO[SalesforceError, Json] + + def objectList: IO[SalesforceError, Json] + + def objectNames: IO[SalesforceError, List[String]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceError.scala b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceError.scala new file mode 100644 index 0000000..b27dfa8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceError.scala @@ -0,0 +1,9 @@ +package com.harana.modules.salesforce.models + +import com.harana.modules.core.http.models.OkHttpError + +sealed trait SalesforceError +object SalesforceError { + case object ParseError extends SalesforceError + case class ConnectionError(err: OkHttpError) extends SalesforceError +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceQuota.scala b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceQuota.scala new file mode 100644 index 0000000..20b8268 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceQuota.scala @@ -0,0 +1,5 @@ +package com.harana.modules.salesforce.models + +case class SalesforceQuota(used: Int, + remaining: Int, + percentage: Int) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/segment/LiveSegment.scala b/jvm/src/main/scala/com/harana/modules/segment/LiveSegment.scala new file mode 100644 index 0000000..57af1b9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/segment/LiveSegment.scala @@ -0,0 +1,70 @@ +package com.harana.modules.segment + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.segment.models.SegmentOptions +import com.segment.analytics.Analytics +import com.segment.analytics.messages._ +import zio.{IO, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveSegment { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSegment(config, logger, micrometer) + } +} + +case class LiveSegment(config: Config, logger: Logger, micrometer: Micrometer) extends Segment { + + private val analytics = config.secret("segment-write-key").map(c => Analytics.builder(c).build) + + def alias(previousId: String, userId: String, options: SegmentOptions): IO[Nothing, Unit] = { + val builder = AliasMessage.builder(previousId) + sendMessage(builder, userId, options) + } + + def group(userId: String, groupId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = GroupMessage.builder(groupId).traits(traits.asJava) + sendMessage(builder, userId, options) + } + + def identify(userId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = IdentifyMessage.builder().traits(traits.asJava) + sendMessage(builder, userId, options) + } + + def page(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = PageMessage.builder(name).properties(properties.asJava) + sendMessage(builder, userId, options) + } + + def screen(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = ScreenMessage.builder(name).properties(properties.asJava) + sendMessage(builder, userId, options) + } + + def track(userId: String, event: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = TrackMessage.builder(event).properties(properties.asJava) + sendMessage(builder, userId, options) + } + + def flush: IO[Nothing, Unit] = + analytics.map(_.flush()) + + private def sendMessage(builder: MessageBuilder[_ <: Message, _ <: MessageBuilder[_ <: Message, _ <: AnyRef]], + userId: String, + options: SegmentOptions) = { + if (options.isAnonymous) builder.anonymousId(userId) else builder.userId(userId) + if (options.timestamp.nonEmpty) builder.timestamp(options.timestamp.get) + //if (options.integrationOptions.nonEmpty) builder.integrationOptions(options.integrationOptions.get._1, options.integrationOptions.get._2.asJava) + builder.context(options.context.asJava) + options.enabledIntegrations.foreach { i => builder.enableIntegration(i._1, i._2) } + analytics.map(_.enqueue(builder)) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/segment/Segment.scala b/jvm/src/main/scala/com/harana/modules/segment/Segment.scala new file mode 100644 index 0000000..2acc7b2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/segment/Segment.scala @@ -0,0 +1,22 @@ +package com.harana.modules.segment + +import com.harana.modules.segment.models.SegmentOptions +import zio.IO +import zio.macros.accessible + +@accessible +trait Segment { + def alias(previousId: String, userId: String, options: SegmentOptions): IO[Nothing, Unit] + + def group(userId: String, groupId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def identify(userId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def page(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def screen(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def track(userId: String, event: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def flush: IO[Nothing, Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/segment/models/SegmentOptions.scala b/jvm/src/main/scala/com/harana/modules/segment/models/SegmentOptions.scala new file mode 100644 index 0000000..d168e88 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/segment/models/SegmentOptions.scala @@ -0,0 +1,10 @@ +package com.harana.modules.segment.models + +import java.util.Date + +case class SegmentOptions(context: Map[String, _] = Map(), + enabledIntegrations: Map[String, Boolean] = Map(), + integrationOptions: Option[(String, Map[String, _ <: Object])] = None, + isAnonymous: Boolean = false, + messageId: Option[String] = None, + timestamp: Option[Date] = None){} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/sentry/LiveSentry.scala b/jvm/src/main/scala/com/harana/modules/sentry/LiveSentry.scala new file mode 100644 index 0000000..1aa98c1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/sentry/LiveSentry.scala @@ -0,0 +1,32 @@ +package com.harana.modules.sentry + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.sentry.{Breadcrumb, Sentry => ioSentry} +import zio.{UIO, ZIO, ZLayer} + +object LiveSentry { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSentry(config, logger, micrometer) + } +} + +case class LiveSentry(config: Config, logger: Logger, micrometer: Micrometer) extends Sentry { + + // FIXME + config.string("sentry.dsn", "").map(ioSentry.init) + + def addBreadcrumb(message: String): UIO[Unit] = { + ZIO.succeed(ioSentry.addBreadcrumb(message)) + } + + def addBreadcrumb(breadcrumb: Breadcrumb): UIO[Unit] = { + ZIO.succeed(ioSentry.addBreadcrumb(breadcrumb)) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/sentry/Sentry.scala b/jvm/src/main/scala/com/harana/modules/sentry/Sentry.scala new file mode 100644 index 0000000..c8a33d2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/sentry/Sentry.scala @@ -0,0 +1,11 @@ +package com.harana.modules.sentry + +import io.sentry.Breadcrumb +import zio.UIO +import zio.macros.accessible + +@accessible +trait Sentry { + def addBreadcrumb(message: String): UIO[Unit] + def addBreadcrumb(breadcrumb: Breadcrumb): UIO[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/LiveShopify.scala b/jvm/src/main/scala/com/harana/modules/shopify/LiveShopify.scala new file mode 100644 index 0000000..944a0d2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/LiveShopify.scala @@ -0,0 +1,304 @@ +package com.harana.modules.shopify + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.shopify.models._ +import io.circe.parser._ +import io.circe.syntax.EncoderOps +import io.circe.{Decoder, Encoder} +import purecsv.safe._ +import zio.{Ref, Task, ZIO, ZLayer} + +import java.io.File +import java.time.ZoneId +import java.time.temporal.{ChronoUnit, TemporalAdjusters} +import scala.util.Try + +object LiveShopify { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveShopify(config, http, logger, micrometer) + } +} + +case class LiveShopify(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Shopify { + + def forecastInventory(implicit connection: ShopifyConnection): Task[List[Output]] = + for { + products <- products(limit = Some(250), status = Some("active")).map(_.items) + _ <- logger.info(s"Number of products: ${products.size}") + productMap = products.map(p => p.id -> p).toMap + variants = products.flatMap(p => p.variants) + variantsMap = variants.map(v => v.id -> v).toMap + _ <- logger.info(s"Number of variants: ${variantsMap.size}") + + outputs = variants.map(v => Output(productMap(v.productId).title, v.title, v.sku.getOrElse(""), v.id, v.option1.getOrElse(""), v.option2.getOrElse(""), v.option3.getOrElse(""), "0", "0", "0", "0", "0")) + orders <- all(orders(limit = Some(250), status = Some("any"))) + _ <- logger.info(s"Number of orders: ${orders.size}") + + ordersByDate = orders.groupBy(o => o.createdAt.atZone(ZoneId.systemDefault()).`with`(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS)) + lineItemsByDate = ordersByDate.view.mapValues(orders => orders.flatMap(_.lineItems.map(li => (lineItemTitle(li), li.quantity)))) + lineItemsByVariantIdMap = orders.flatMap(_.lineItems.map(li => li.variantId -> li)).toMap + groupedLineItemsByDate = lineItemsByDate.mapValues(lineItems => lineItems.groupBy(_._1).view.mapValues(_.map(_._2).sum).toList.sortBy(_._1)) + + groupedLineItemsByDateMap = groupedLineItemsByDate.mapValues(sumByKeys).mapValues(_.toMap) + sortedDates = groupedLineItemsByDate.keys.toList.sortBy(_.toString).take(3) + _ <- logger.info(s"Dates to output: ${sortedDates.map(_.toString)}") + + middleOutputs = outputs.map { o => + if (lineItemsByVariantIdMap.contains(o.variantId)) { + val lineItem = lineItemsByVariantIdMap(o.variantId) + + val month1 = Try(groupedLineItemsByDateMap(sortedDates.head)(lineItemTitle(lineItem))).toOption.getOrElse(0L) + val month2 = Try(groupedLineItemsByDateMap(sortedDates(1))(lineItemTitle(lineItem))).toOption.getOrElse(0L) + val month3 = Try(groupedLineItemsByDateMap(sortedDates(2))(lineItemTitle(lineItem))).toOption.getOrElse(0L) + val total = month1 + month2 + month3 + o.copy(month1Sales = month1.toString, month2Sales = month2.toString, month3Sales = month3.toString, totalSales = total.toString) + } else { + o.copy(month1Sales = "-", month2Sales = "-", month3Sales = "-", totalSales = "-") + } + } + + location <- locations(connection).map(_.items.head) + _ <- logger.info(s"Found location with id: ${location.id}") + + inventoryLevels <- all(inventoryLevels(limit = Some(250), locationIds = List(location.id))) + inventoryLevelsMap = inventoryLevels.map(il => il.inventoryItemId -> il.available).toMap + finalOutputs = middleOutputs.map(o => o.copy(inventoryLevel = Try(inventoryLevelsMap(variantsMap(o.variantId).inventoryItemId).toString).toOption.getOrElse("-"))) + + _ = finalOutputs.writeCSVToFile(new File("/tmp/output.csv")) + + } yield finalOutputs + + + private def sumByKeys[A](tuples: List[(A, Long)]) : List[(A, Long)] = { + tuples.groupBy(_._1).view.mapValues(_.map(_._2).sum).toList + } + + + private def lineItemTitle(li: LineItem) = + s"${li.productId}-${li.variantId}" + + + def orders(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + attributionAppId: Option[String] = None, + status: Option[String] = None, + financialStatus: Option[String] = None, + fulfillment_status: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[Order]] = + getList[Order](s"orders", Map( + "ids" -> ids.mkString(","), + "limit" -> limit.getOrElse(50).toString, + "status" -> status.getOrElse("") + )) + + + def products(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + title: Option[String] = None, + vendor: Option[String] = None, + handle: Option[String] = None, + productType: Option[String] = None, + status: Option[String] = None, + collectionId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + publishedStatus: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[models.Product]] = + getList[models.Product](s"products", Map( + "ids" -> ids.mkString(","), + "limit" -> limit.getOrElse(50).toString, + "status" -> status.getOrElse("")) + ) + + + def inventoryLevels(inventoryItemIds: List[Long] = List(), + locationIds: List[Long] = List(), + limit: Option[Int] = None, + updatedAtMin: Option[String] = None) + (implicit connection: ShopifyConnection): Task[Page[InventoryLevel]] = + getList[InventoryLevel](s"inventory_levels", Map( + "limit" -> limit.getOrElse(50).toString, + "location_ids" -> locationIds.map(_.toString).mkString(","))) + + + def locations(implicit connection: ShopifyConnection): Task[Page[Location]] = + getList[Location](s"locations", Map()) + + + def customer(id: String, + fields: List[String] = List()) + (implicit connection: ShopifyConnection): Task[Customer] = + get[Customer](s"customers/$id", Map()) + + + def createCustomerMetafield(customerId: String, metafield: Metafield)(implicit connection: ShopifyConnection): Task[Metafield] = + post[Metafield](s"customers/$customerId/metafields", Map(), metafield) + + + def customerMetafield(customerId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] = + get[Metafield](s"customers/$customerId/metafields/$metafieldId.json", Map()) + + + def setCustomerMetafield(customerId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] = + post[String](s"customers/$customerId/metafields/$metafieldId.json", Map(), + Map("metafield" -> Map("id" -> metafieldId, "value" -> metafieldValue, "type" -> metafieldType)).asJson.noSpaces + ).ignore + + def orderMetafield(orderId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] = + get[Metafield](s"orders/$orderId/metafields/$metafieldId.json", Map()) + + + def setOrderMetafield(orderId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] = + post[String](s"orders/$orderId/metafields/$metafieldId.json", Map(), + Map("metafield" -> Map("id" -> metafieldId, "value" -> metafieldValue, "type" -> metafieldType)).asJson.noSpaces + ).ignore + + + def product(id: String, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[models.Product] = + get[models.Product](s"products/$id", Map()) + + + def productVariants(productId: Long, + limit: Option[Int] = None, + presentmentCurrencies: List[String] = List(), + sinceId: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[ProductVariant]] = + getList[ProductVariant](s"products/$productId/variants", Map( + "fields" -> fields.mkString(","), + "limit" -> limit.getOrElse(50).toString, + "presentment_currencies" -> presentmentCurrencies.mkString(","), + "since_id" -> sinceId.getOrElse(""))) + + + def uploadImage(sourceUrl: String)(implicit connection: ShopifyConnection): Task[Unit] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-07/graphql.json") + _ <- http.post( + url, + credentials = Some((connection.apiKey, connection.password)), + body = Some( + s"""{ + "query": "mutation fileCreate($$files: [FileCreateInput!]!) { fileCreate(files: $$files) { files { alt createdAt } } }", + "variables": { + "files": { + "contentType": "IMAGE", + "originalSource": "$sourceUrl" + } + } + }""" + ) + ).mapBoth(e => new Exception(e.toString), _.body().string()) + } yield () + + + def previousPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] = + ZIO.foreach(page.previousUrl)(url => getUrlList[T](url, Map())) + + + def nextPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] = + ZIO.foreach(page.nextUrl)(url => getUrlList[T](url, Map())) + + + def all[T](fn: => Task[Page[T]])(implicit connection: ShopifyConnection, d : Decoder[T]): Task[List[T]] = + for { + itemsRef <- Ref.make[List[T]](List()) + initialPage <- fn + currentPageRef <- Ref.make[Option[Page[T]]](Some(initialPage)) + _ <- (for { + currentPage <- currentPageRef.get + existingItems <- itemsRef.get + _ <- itemsRef.set (existingItems ++ currentPage.get.items) + nextPage <- ZIO.foreach(currentPage)(nextPage[T](_)).map(_.flatten) + _ <- currentPageRef.set(nextPage) + } yield ()).repeatWhileZIO { _ => currentPageRef.get.map(_.nonEmpty) } + items <- itemsRef.get + } yield items + + + private def get[T](endpoint: String, parameters: Map[String, String]) + (implicit connection: ShopifyConnection, d: Decoder[T]): Task[T] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + response <- http.get(url, params = parameters.map{ case (k, v) => k -> List(v) }, credentials = Some((connection.apiKey, connection.password))).mapBoth(e => new Exception(e.toString), _.body().string()) + obj <- ZIO.fromEither(decode[T](response)) + } yield obj + + + private def getList[T](endpoint: String, parameters: Map[String, String]) + (implicit connection: ShopifyConnection, d: Decoder[T]): Task[Page[T]] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + page <- getUrlList[T](url, parameters) + } yield page + + + private def getUrlList[T](url: String, parameters: Map[String, String]) + (implicit connection: ShopifyConnection, d: Decoder[T]): Task[Page[T]] = + for { + response <- http.get(url, params = parameters.map{ case (k, v) => k -> List(v) }, credentials = Some((connection.apiKey, connection.password))).mapError(e => new Exception(e.toString)) + + rel = Option(response.header("link")) + relUrl = rel.map(r => r.substring(1, r.indexOf(">"))) + relType = rel.map(r => r.substring(r.indexOf("rel=")+5, r.length-1)) + + cursor <- ZIO.attempt(parse(response.body().string).toOption.get.hcursor) + root <- ZIO.attempt(cursor.keys.get.head) + json <- ZIO.attempt(cursor.downField(root).focus.get) + items <- ZIO.fromEither(json.as[List[T]]).onError(e => logger.error(e.prettyPrint)) + + page = (rel, relType) match { + case (None, _) => Page(None, None, items) + case (Some(_), Some(rt)) if (rt == "previous") => Page(relUrl, None, items) + case (Some(_), Some(rt)) if (rt == "next") => Page(None, relUrl, items) + } + } yield page + + + private def post[T](endpoint: String, parameters: Map[String, String], body: T) + (implicit connection: ShopifyConnection, d: Decoder[T], e: Encoder[T]): Task[T] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + response <- http.post( + url, + params = parameters.map { case (k, v) => k -> List(v) }, + credentials = Some((connection.apiKey, connection.password)), + body = Some(body.asJson.noSpaces) + ).mapBoth(e => new Exception(e.toString), _.body().string()) + obj <- ZIO.fromEither(decode[T](response)) + } yield obj + + + private def put[T](endpoint: String, parameters: Map[String, String], body: T) + (implicit connection: ShopifyConnection, d: Decoder[T], e: Encoder[T]): Task[T] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + response <- http.put( + url, + params = parameters.map { case (k, v) => k -> List(v) }, + credentials = Some((connection.apiKey, connection.password)), + body = Some(body.asJson.noSpaces) + ).mapBoth(e => new Exception(e.toString), _.body().string()) + obj <- ZIO.fromEither(decode[T](response)) + } yield obj +} diff --git a/jvm/src/main/scala/com/harana/modules/shopify/Shopify.scala b/jvm/src/main/scala/com/harana/modules/shopify/Shopify.scala new file mode 100644 index 0000000..e035008 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/Shopify.scala @@ -0,0 +1,80 @@ +package com.harana.modules.shopify + +import com.harana.modules.shopify.models._ +import io.circe.Decoder +import zio.Task +import zio.macros.accessible + +@accessible +trait Shopify { + + def all[T](fn: => Task[Page[T]])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[List[T]] + + def previousPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] + + def nextPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] + + def forecastInventory(implicit connection: ShopifyConnection): Task[List[Output]] + + def orders(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + attributionAppId: Option[String] = None, + status: Option[String] = None, + financialStatus: Option[String] = None, + fulfillment_status: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[Order]] + + def products(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + title: Option[String] = None, + vendor: Option[String] = None, + handle: Option[String] = None, + productType: Option[String] = None, + status: Option[String] = None, + collectionId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + publishedStatus: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[models.Product]] + + def inventoryLevels(inventoryItemIds: List[Long] = List(), + locationIds: List[Long] = List(), + limit: Option[Int] = None, + updatedAtMin: Option[String] = None)(implicit connection: ShopifyConnection): Task[Page[InventoryLevel]] + + def customer(id: String, fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Customer] + + def customerMetafield(customerId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] + + def createCustomerMetafield(customerId: String, metafield: Metafield)(implicit connection: ShopifyConnection): Task[Metafield] + + def setCustomerMetafield(customerId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] + + def orderMetafield(orderId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] + + def setOrderMetafield(orderId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] + + def locations(implicit connection: ShopifyConnection): Task[Page[Location]] + + def product(id: String, fields: List[String] = List())(implicit connection: ShopifyConnection): Task[models.Product] + + def productVariants(productId: Long, + limit: Option[Int] = None, + presentmentCurrencies: List[String] = List(), + sinceId: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[ProductVariant]] + + def uploadImage(sourceUrl: String)(implicit connection: ShopifyConnection): Task[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Address.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Address.scala new file mode 100644 index 0000000..567232d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Address.scala @@ -0,0 +1,17 @@ +package com.harana.modules.shopify.models + +case class Address(firstName: String, + lastName: String, + name: String, + company: Option[String], + address1: String, + address2: Option[String], + city: String, + zip: String, + province: String, + country: String, + provinceCode: String, + countryCode: String, + phone: Option[String], + latitude: Option[BigDecimal], + longitude: Option[BigDecimal]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Attribute.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Attribute.scala new file mode 100644 index 0000000..6cdad2c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Attribute.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class Attribute(name: String, + value: String) + diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Customer.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Customer.scala new file mode 100644 index 0000000..3da0df1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Customer.scala @@ -0,0 +1,18 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Customer(id: Long, + email: String, + acceptsMarketing: Boolean, + createdAt: Instant, + updatedAt: Instant, + firstName: String, + lastName: String, + phone: Option[String], + ordersCount: Long, + state: String, + totalSpent: BigDecimal, + note: Option[String], + verifiedEmail: Option[Boolean], + addresses: Set[Address]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Fulfilment.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Fulfilment.scala new file mode 100644 index 0000000..59b22cc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Fulfilment.scala @@ -0,0 +1,15 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Fulfilment(id: Long, + orderId: Long, + status: String, + createdAt: Instant, + updatedAt: Instant, + trackingCompany: Option[String], + trackingNumber: Option[String], + lineItems: List[LineItem], + trackingUrl: Option[String], + trackingUrls: List[String], + locationId: Option[Long]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Image.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Image.scala new file mode 100644 index 0000000..e97646a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Image.scala @@ -0,0 +1,8 @@ +package com.harana.modules.shopify.models + +case class Image(id: Long, + productId: Long, + name: Option[String], + position: Int, + source: Option[String], + variantIds: List[Long]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryLevel.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryLevel.scala new file mode 100644 index 0000000..5f4357d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryLevel.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class InventoryLevel(inventoryItemId: Long, + locationId: Option[Long], + available: Long) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryPolicy.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryPolicy.scala new file mode 100644 index 0000000..0a37aab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryPolicy.scala @@ -0,0 +1,11 @@ +package com.harana.modules.shopify.models + +import enumeratum.values.{StringCirceEnum, StringEnum, StringEnumEntry} + +sealed abstract class InventoryPolicy(val value: String) extends StringEnumEntry + +object InventoryPolicy extends StringEnum[InventoryPolicy] with StringCirceEnum[InventoryPolicy] { + case object Continue extends InventoryPolicy("continue") + case object Deny extends InventoryPolicy("deny") + val values = findValues +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/LineItem.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/LineItem.scala new file mode 100644 index 0000000..b6149a2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/LineItem.scala @@ -0,0 +1,21 @@ +package com.harana.modules.shopify.models + +case class LineItem(id: Long, + variantId: Long, + title: String, + quantity: Long, + price: BigDecimal, + grams: Long, + sku: Option[String], + variantTitle: String, + vendor: String, + productId: Long, + requiresShipping: Boolean, + taxable: Boolean, + giftCard: Boolean, + name: String, + variantInventoryManagement: Option[String], + fulfillableQuantity: Long, + totalDiscount: BigDecimal, + fulfillmentStatus: Option[String], + fulfillmentService: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Location.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Location.scala new file mode 100644 index 0000000..70acb87 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Location.scala @@ -0,0 +1,14 @@ +package com.harana.modules.shopify.models + +case class Location(id: Long, + name: String, + address1: String, + address2: String, + city: String, + zip: String, + country: String, + phone: String, + province: String, + countryCode: String, + countryName: String, + provinceCode: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Metafield.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Metafield.scala new file mode 100644 index 0000000..0cb22ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Metafield.scala @@ -0,0 +1,15 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Metafield(id: Long, + namespace: String, + key: String, + value: String, + description: Option[String], + owner_id: Long, + owner_resource: String, + created_at: Option[Instant], + updated_at: Option[Instant], + `type`: String, + admin_graphql_api_id: String) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/MetafieldType.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/MetafieldType.scala new file mode 100644 index 0000000..c32194b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/MetafieldType.scala @@ -0,0 +1,53 @@ +package com.harana.modules.shopify.models + +import enumeratum.values.{StringCirceEnum, StringEnum, StringEnumEntry} + +sealed abstract class MetafieldType(val value: String) extends StringEnumEntry + +object MetafieldType extends StringEnum[MetafieldType] with StringCirceEnum[MetafieldType] { + case object Boolean extends MetafieldType("boolean") + case object Color extends MetafieldType("color") + case object Date extends MetafieldType("date") + case object DateTime extends MetafieldType("date_time") + case object Dimension extends MetafieldType("dimension") + case object Json extends MetafieldType("json") + case object Money extends MetafieldType("money") + case object MultiLineTextField extends MetafieldType("multi_line_text_field") + case object NumberDecimal extends MetafieldType("number_decimal") + case object NumberInteger extends MetafieldType("number_integer") + case object Rating extends MetafieldType("rating") + case object RichTextField extends MetafieldType("rich_text_field") + case object SingleLineTextField extends MetafieldType("single_line_text_field") + case object Url extends MetafieldType("url") + case object Volume extends MetafieldType("volume") + case object Weight extends MetafieldType("weight") + + case object CollectionReference extends MetafieldType("collection_reference") + case object FileReference extends MetafieldType("file_reference") + case object MetaobjectReference extends MetafieldType("metaobject_reference") + case object MixedReference extends MetafieldType("mixed_reference") + case object PageReference extends MetafieldType("page_reference") + case object ProductReference extends MetafieldType("product_reference") + case object VariantReference extends MetafieldType("variant_reference") + + case object ListCollectionReference extends MetafieldType("list.collection_reference") + case object ListColor extends MetafieldType("list.color") + case object ListDate extends MetafieldType("list.date") + case object ListDateTime extends MetafieldType("list.date_time") + case object ListDimension extends MetafieldType("list.dimension") + case object ListFileReference extends MetafieldType("list.file_reference") + case object ListMetaobjectReference extends MetafieldType("list.metaobject_reference") + case object ListMixedReference extends MetafieldType("list.mixed_reference") + case object ListNumberInteger extends MetafieldType("list.number_integer") + case object ListNumberDecimal extends MetafieldType("list.number_decimal") + case object ListPageReference extends MetafieldType("list.page_reference") + case object ListProductReference extends MetafieldType("list.product_reference") + case object ListRating extends MetafieldType("list.rating") + case object ListSingleLineTextField extends MetafieldType("list.single_line_text_field") + case object ListUrl extends MetafieldType("list.url") + case object ListVariantReference extends MetafieldType("list.variant_reference") + case object ListVolume extends MetafieldType("list.volume") + case object ListWeight extends MetafieldType("list.weight") + + val values = findValues +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Metaobject.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Metaobject.scala new file mode 100644 index 0000000..40d6d09 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Metaobject.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class Metaobject(id: String, + `type`: String, + fields: List[Metafield]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/MetaobjectDefinition.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/MetaobjectDefinition.scala new file mode 100644 index 0000000..79c5999 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/MetaobjectDefinition.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class MetaobjectDefinition(id: String, + `type`: String, + fieldDefinitions: List[Metafield]) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Order.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Order.scala new file mode 100644 index 0000000..264bb99 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Order.scala @@ -0,0 +1,47 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Order(id: Long, + email: String, + closedAt: Option[Instant], + createdAt: Instant, + updatedAt: Instant, + number: Int, + note: Option[String], + token: String, + totalPrice: BigDecimal, + subtotalPrice: BigDecimal, + totalWeight: Long, + totalTax: BigDecimal, + taxesIncluded: Boolean = false, + currency: Option[String], + financialStatus: String, + totalDiscounts: BigDecimal, + totalLineItemsPrice: BigDecimal, + cartToken: String, + buyerAcceptsMarketing: Boolean = false, + name: String, + referringSite: String, + landingSite: String, + cancelledAt: Option[Instant], + cancelReason: Option[String], + userId: Option[Long], + locationId: Option[Long], + processedAt: Instant, + browserIp: String, + orderNumber: Long, + processingMethod: String, + sourceName: String, + fulfillmentStatus: Option[String], + tags: String, + orderStatusUrl: String, + lineItems: List[LineItem], + fulfillments: List[Fulfilment], + billingAddress: Address, + shippingAddress: Option[Address], + customer: Customer, + shippingLines: List[ShippingLine], + taxLines: List[TaxLine], + noteAttributes: List[Attribute], + refunds: List[Refund]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Page.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Page.scala new file mode 100644 index 0000000..c55ee80 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Page.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class Page[T](previousUrl: Option[String], + nextUrl: Option[String], + items: List[T]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Product.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Product.scala new file mode 100644 index 0000000..401c797 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Product.scala @@ -0,0 +1,18 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Product(id: Long, + title: String, + productType: String, + bodyHtml: String, + vendor: String, + tags: String, + options: List[ProductOption], + metafieldsGlobalTitleTag: Option[String], + metafieldsGlobalDescriptionTag: Option[String], + images: List[Image], + image: Image, + variants: List[ProductVariant], + publishedAt: Option[Instant], + published: Option[Boolean]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ProductOption.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductOption.scala new file mode 100644 index 0000000..9321aa2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductOption.scala @@ -0,0 +1,7 @@ +package com.harana.modules.shopify.models + +case class ProductOption(id: Long, + productId: Long, + name: String, + position: Int, + values: List[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ProductVariant.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductVariant.scala new file mode 100644 index 0000000..872ba30 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductVariant.scala @@ -0,0 +1,23 @@ +package com.harana.modules.shopify.models + +case class ProductVariant(id: Long, + productId: Long, + title: String, + price: BigDecimal, + compareAtPrice: Option[BigDecimal], + sku: Option[String], + barcode: Option[String], + position: Int, + grams: Long, + inventoryQuantity: Long, + imageId: Option[Long], + inventoryPolicy: Option[String], + inventoryManagement: Option[String], + option1: Option[String], + option2: Option[String], + option3: Option[String], + fulfillmentService: Option[String], + requiresShipping: Boolean, + taxable: Boolean, + inventoryItemId: Long, + available: Option[Long]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Refund.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Refund.scala new file mode 100644 index 0000000..97d74ac --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Refund.scala @@ -0,0 +1,14 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Refund(id: Long, + orderId: Long, + createdAt: Instant, + note: Option[String], + userId: Option[Long] , + processedAt: Instant, + refundLineItems: List[RefundLineItem], + shipping: Option[RefundShippingDetails], + transactions: List[Transaction], + currency: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/RefundLineItem.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundLineItem.scala new file mode 100644 index 0000000..2760dd6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundLineItem.scala @@ -0,0 +1,10 @@ +package com.harana.modules.shopify.models + +case class RefundLineItem(id: Long, + quantity: Long, + lineItemId: Long, + locationId: Option[Long], + restockType: String, + subtotal: BigDecimal, + totalTax: BigDecimal, + lineItem: LineItem) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/RefundShippingDetails.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundShippingDetails.scala new file mode 100644 index 0000000..4facc2d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundShippingDetails.scala @@ -0,0 +1,6 @@ +package com.harana.modules.shopify.models + +case class RefundShippingDetails(amount: BigDecimal, + tax: BigDecimal, + maximumRefundable: Option[BigDecimal], + fullRefund: Boolean) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ShippingLine.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ShippingLine.scala new file mode 100644 index 0000000..46208ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ShippingLine.scala @@ -0,0 +1,7 @@ +package com.harana.modules.shopify.models + +case class ShippingLine(id: Long, + title: String, + price: BigDecimal, + code: String, + source: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ShopifyConnection.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ShopifyConnection.scala new file mode 100644 index 0000000..610ec34 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ShopifyConnection.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class ShopifyConnection(subdomain: String, + apiKey: String, + password: String) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/TaxLine.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/TaxLine.scala new file mode 100644 index 0000000..bb35fa2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/TaxLine.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class TaxLine(title: String, + price: BigDecimal, + rate: BigDecimal) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Transaction.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Transaction.scala new file mode 100644 index 0000000..05acdfd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Transaction.scala @@ -0,0 +1,10 @@ +package com.harana.modules.shopify.models + +case class Transaction(orderId: Long, + kind: String, + gateway: String, + parentId: Long, + amount: BigDecimal, + currency: Option[String], + maximumRefundable: Option[BigDecimal], + receipt: TransactionReceipt) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/TransactionReceipt.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/TransactionReceipt.scala new file mode 100644 index 0000000..089e307 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/TransactionReceipt.scala @@ -0,0 +1,3 @@ +package com.harana.modules.shopify.models + +case class TransactionReceipt(applePay: Option[Boolean]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/package.scala b/jvm/src/main/scala/com/harana/modules/shopify/package.scala new file mode 100644 index 0000000..fd45daf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/package.scala @@ -0,0 +1,80 @@ +package com.harana.modules + +import com.harana.modules.shopify.models._ +import io.circe.generic.extras.Configuration +import io.circe.generic.extras.semiauto._ +import io.circe.{Decoder, Encoder} + +import java.time.Instant +import java.time.format.DateTimeFormatter +import scala.util.Try + +package object shopify { + + case class Output(productTitle: String, + variantTitle: String, + variantSku: String, + variantId: Long, + variantOption1: String, + variantOption2: String, + variantOption3: String, + month1Sales: String, + month2Sales: String, + month3Sales: String, + totalSales: String, + inventoryLevel: String) + + implicit val jsonConfig: Configuration = Configuration.default.withSnakeCaseMemberNames.withSnakeCaseConstructorNames.withDefaults + + implicit val encodeInstant: Encoder[Instant] = Encoder.encodeString.contramap[Instant](_.toString) + implicit val decodeInstant: Decoder[Instant] = Decoder.decodeString.emapTry { str => Try(Instant.from(DateTimeFormatter.ISO_OFFSET_DATE_TIME.parse(str))) } + + implicit val addressEncoder: Encoder[Address] = deriveConfiguredEncoder[Address] + implicit val addressDecoder: Decoder[Address] = deriveConfiguredDecoder[Address] + implicit val attributeEncoder: Encoder[Attribute] = deriveConfiguredEncoder[Attribute] + implicit val attributeDecoder: Decoder[Attribute] = deriveConfiguredDecoder[Attribute] + implicit val customerEncoder: Encoder[Customer] = deriveConfiguredEncoder[Customer] + implicit val customerDecoder: Decoder[Customer] = deriveConfiguredDecoder[Customer] + implicit val fulfilmentEncoder: Encoder[Fulfilment] = deriveConfiguredEncoder[Fulfilment] + implicit val fulfilmentDecoder: Decoder[Fulfilment] = deriveConfiguredDecoder[Fulfilment] + implicit val imageEncoder: Encoder[Image] = deriveConfiguredEncoder[Image] + implicit val imageDecoder: Decoder[Image] = deriveConfiguredDecoder[Image] + implicit val inventoryLevelEncoder: Encoder[InventoryLevel] = deriveConfiguredEncoder[InventoryLevel] + implicit val inventoryLevelDecoder: Decoder[InventoryLevel] = deriveConfiguredDecoder[InventoryLevel] + implicit val inventoryPolicyEncoder: Encoder[InventoryPolicy] = deriveConfiguredEncoder[InventoryPolicy] + implicit val inventoryPolicyDecoder: Decoder[InventoryPolicy] = deriveConfiguredDecoder[InventoryPolicy] + implicit val lineItemEncoder: Encoder[LineItem] = deriveConfiguredEncoder[LineItem] + implicit val lineItemDecoder: Decoder[LineItem] = deriveConfiguredDecoder[LineItem] + implicit val locationEncoder: Encoder[Location] = deriveConfiguredEncoder[Location] + implicit val locationDecoder: Decoder[Location] = deriveConfiguredDecoder[Location] + implicit val metafieldEncoder: Encoder[Metafield] = deriveConfiguredEncoder[Metafield] + implicit val metafieldDecoder: Decoder[Metafield] = deriveConfiguredDecoder[Metafield] + implicit val metaobjectEncoder: Encoder[Metaobject] = deriveConfiguredEncoder[Metaobject] + implicit val metaobjectDecoder: Decoder[Metaobject] = deriveConfiguredDecoder[Metaobject] + implicit val metaobjectDefinitionEncoder: Encoder[MetaobjectDefinition] = deriveConfiguredEncoder[MetaobjectDefinition] + implicit val metaobjectDefinitionDecoder: Decoder[MetaobjectDefinition] = deriveConfiguredDecoder[MetaobjectDefinition] + implicit val metafieldTypeEncoder: Encoder[MetafieldType] = deriveConfiguredEncoder[MetafieldType] + implicit val metafieldTypeDecoder: Decoder[MetafieldType] = deriveConfiguredDecoder[MetafieldType] + implicit val orderEncoder: Encoder[Order] = deriveConfiguredEncoder[Order] + implicit val orderDecoder: Decoder[Order] = deriveConfiguredDecoder[Order] + implicit val productEncoder: Encoder[Product] = deriveConfiguredEncoder[Product] + implicit val productDecoder: Decoder[Product] = deriveConfiguredDecoder[Product] + implicit val productOptionEncoder: Encoder[ProductOption] = deriveConfiguredEncoder[ProductOption] + implicit val productOptionDecoder: Decoder[ProductOption] = deriveConfiguredDecoder[ProductOption] + implicit val productVariantEncoder: Encoder[ProductVariant] = deriveConfiguredEncoder[ProductVariant] + implicit val productVariantDecoder: Decoder[ProductVariant] = deriveConfiguredDecoder[ProductVariant] + implicit val refundEncoder: Encoder[Refund] = deriveConfiguredEncoder[Refund] + implicit val refundDecoder: Decoder[Refund] = deriveConfiguredDecoder[Refund] + implicit val refundLineItemEncoder: Encoder[RefundLineItem] = deriveConfiguredEncoder[RefundLineItem] + implicit val refundLineItemDecoder: Decoder[RefundLineItem] = deriveConfiguredDecoder[RefundLineItem] + implicit val refundShippingDetailsEncoder: Encoder[RefundShippingDetails] = deriveConfiguredEncoder[RefundShippingDetails] + implicit val refundShippingDetailsDecoder: Decoder[RefundShippingDetails] = deriveConfiguredDecoder[RefundShippingDetails] + implicit val shippingLineEncoder: Encoder[ShippingLine] = deriveConfiguredEncoder[ShippingLine] + implicit val shippingLineDecoder: Decoder[ShippingLine] = deriveConfiguredDecoder[ShippingLine] + implicit val taxLineEncoder: Encoder[TaxLine] = deriveConfiguredEncoder[TaxLine] + implicit val taxLineDecoder: Decoder[TaxLine] = deriveConfiguredDecoder[TaxLine] + implicit val transactionEncoder: Encoder[Transaction] = deriveConfiguredEncoder[Transaction] + implicit val transactionDecoder: Decoder[Transaction] = deriveConfiguredDecoder[Transaction] + implicit val transactionReceiptEncoder: Encoder[TransactionReceipt] = deriveConfiguredEncoder[TransactionReceipt] + implicit val transactionReceiptDecoder: Decoder[TransactionReceipt] = deriveConfiguredDecoder[TransactionReceipt] +} diff --git a/jvm/src/main/scala/com/harana/modules/shopify_app/LiveShopifyApp.scala b/jvm/src/main/scala/com/harana/modules/shopify_app/LiveShopifyApp.scala new file mode 100644 index 0000000..f801522 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify_app/LiveShopifyApp.scala @@ -0,0 +1,188 @@ +package com.harana.modules.shopify_app + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.shopify.ShopifySdk +import com.shopify.model._ +import org.joda.time.DateTime +import zio.{Task, ZIO, ZLayer} + +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveShopifyApp { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveShopifyApp(config, logger, micrometer) + } +} + +case class LiveShopifyApp(config: Config, logger: Logger, micrometer: Micrometer) extends ShopifyApp { + + def activateRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] = + sdk(subdomain, accessToken).map(_.activateRecurringApplicationCharge(chargeId)) + + def cancelFulfillment(subdomain: String, accessToken: String, orderId: String, fulfillmentId: String): Task[ShopifyFulfillment] = + sdk(subdomain, accessToken).map(_.cancelFulfillment(orderId, fulfillmentId)) + + def cancelOrder(subdomain: String, accessToken: String, orderId: String, reason: String): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.cancelOrder(orderId, reason)) + + def closeOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.closeOrder(orderId)) + + def createCustomCollection(subdomain: String, accessToken: String, request: ShopifyCustomCollectionCreationRequest): Task[ShopifyCustomCollection] = + sdk(subdomain, accessToken).map(_.createCustomCollection(request)) + + def createFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentCreationRequest): Task[ShopifyFulfillment] = + sdk(subdomain, accessToken).map(_.createFulfillment(request)) + + def createGiftCard(subdomain: String, accessToken: String, request: ShopifyGiftCardCreationRequest): Task[ShopifyGiftCard] = + sdk(subdomain, accessToken).map(_.createGiftCard(request)) + + def createOrder(subdomain: String, accessToken: String, request: ShopifyOrderCreationRequest): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.createOrder(request)) + + def createProduct(subdomain: String, accessToken: String, request: ShopifyProductCreationRequest): Task[ShopifyProduct] = + sdk(subdomain, accessToken).map(_.createProduct(request)) + + def createProductMetafield(subdomain: String, accessToken: String, request: ShopifyProductMetafieldCreationRequest): Task[Metafield] = + sdk(subdomain, accessToken).map(_.createProductMetafield(request)) + + def createRecurringApplicationCharge(subdomain: String, accessToken: String, request: ShopifyRecurringApplicationChargeCreationRequest): Task[ShopifyRecurringApplicationCharge] = + sdk(subdomain, accessToken).map(_.createRecurringApplicationCharge(request)) + + def createVariantMetafield(subdomain: String, accessToken: String, request: ShopifyVariantMetafieldCreationRequest): Task[Metafield] = + sdk(subdomain, accessToken).map(_.createVariantMetafield(request)) + + def deleteProduct(subdomain: String, accessToken: String, productId: String): Task[Boolean] = + sdk(subdomain, accessToken).map(_.deleteProduct(productId)) + + def getAccessToken(subdomain: String, accessToken: String): Task[String] = + sdk(subdomain, accessToken).map(_.getAccessToken) + + def getCustomCollections(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] = + sdk(subdomain, accessToken).map(_.getCustomCollections(pageInfo, pageSize)) + + def getCustomCollections(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] = + sdk(subdomain, accessToken).map(_.getCustomCollections(pageSize)) + + def getCustomCollections(subdomain: String, accessToken: String): Task[List[ShopifyCustomCollection]] = + sdk(subdomain, accessToken).map(_.getCustomCollections.asScala.toList) + + def getCustomer(subdomain: String, accessToken: String, customerId: String): Task[ShopifyCustomer] = + sdk(subdomain, accessToken).map(_.getCustomer(customerId)) + + def getCustomers(subdomain: String, accessToken: String, request: ShopifyGetCustomersRequest): Task[ShopifyPage[ShopifyCustomer]] = + sdk(subdomain, accessToken).map(_.getCustomers(request)) + + def getLocations(subdomain: String, accessToken: String): Task[List[ShopifyLocation]] = + sdk(subdomain, accessToken).map(_.getLocations.asScala.toList) + + def getOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.getOrder(orderId)) + + def getOrderMetafields(subdomain: String, accessToken: String, orderId: String): Task[List[Metafield]] = + sdk(subdomain, accessToken).map(_.getOrderMetafields(orderId).asScala.toList) + + def getOrderRisks(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyOrderRisk]] = + sdk(subdomain, accessToken).map(_.getOrderRisks(orderId).asScala.toList) + + def getOrderTransactions(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyTransaction]] = + sdk(subdomain, accessToken).map(_.getOrderTransactions(orderId).asScala.toList) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate, appId)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate, appId, pageSize)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate, pageSize)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, pageSize)) + + def getOrders(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(pageInfo, pageSize)) + + def getOrders(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(pageSize)) + + def getOrders(subdomain: String, accessToken: String): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders) + + def getProduct(subdomain: String, accessToken: String, productId: String): Task[ShopifyProduct] = + sdk(subdomain, accessToken).map(_.getProduct(productId)) + + def getProductCount(subdomain: String, accessToken: String): Task[Int] = + sdk(subdomain, accessToken).map(_.getProductCount) + + def getProductMetafields(subdomain: String, accessToken: String, productId: String): Task[List[Metafield]] = + sdk(subdomain, accessToken).map(_.getProductMetafields(productId).asScala.toList) + + def getProducts(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] = + sdk(subdomain, accessToken).map(_.getProducts(pageInfo, pageSize)) + + def getProducts(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] = + sdk(subdomain, accessToken).map(_.getProducts(pageSize)) + + def getProducts(subdomain: String, accessToken: String): Task[ShopifyProducts] = + sdk(subdomain, accessToken).map(_.getProducts) + + def getRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] = + sdk(subdomain, accessToken).map(_.getRecurringApplicationCharge(chargeId)) + + def getShop(subdomain: String, accessToken: String): Task[ShopifyShop] = + sdk(subdomain, accessToken).map(_.getShop) + + def getUpdatedOrdersCreatedBefore(subdomain: String, accessToken: String, minimumUpdatedAtDate: DateTime, maximumUpdatedAtDate: DateTime, maximumCreatedAtDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getUpdatedOrdersCreatedBefore(minimumUpdatedAtDate, maximumUpdatedAtDate, maximumCreatedAtDate, pageSize)) + + def getVariant(subdomain: String, accessToken: String, variantId: String): Task[ShopifyVariant] = + sdk(subdomain, accessToken).map(_.getVariant(variantId)) + + def getVariantMetafields(subdomain: String, accessToken: String, variantId: String): Task[List[Metafield]] = + sdk(subdomain, accessToken).map(_.getVariantMetafields(variantId).asScala.toList) + + def refund(subdomain: String, accessToken: String, request: ShopifyRefundCreationRequest): Task[ShopifyRefund] = + sdk(subdomain, accessToken).map(_.refund(request)) + + def revokeOAuthToken(subdomain: String, accessToken: String): Task[Boolean] = + sdk(subdomain, accessToken).map(_.revokeOAuthToken) + + def searchCustomers(subdomain: String, accessToken: String, query: String): Task[ShopifyPage[ShopifyCustomer]] = + sdk(subdomain, accessToken).map(_.searchCustomers(query)) + + def updateCustomer(subdomain: String, accessToken: String, request: ShopifyCustomerUpdateRequest): Task[ShopifyCustomer] = + sdk(subdomain, accessToken).map(_.updateCustomer(request)) + + def updateFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentUpdateRequest): Task[ShopifyFulfillment] = + sdk(subdomain, accessToken).map(_.updateFulfillment(request)) + + def updateInventoryLevel(subdomain: String, accessToken: String, inventoryItemId: String, locationId: String, quantity: Long): Task[ShopifyInventoryLevel] = + sdk(subdomain, accessToken).map(_.updateInventoryLevel(inventoryItemId, locationId, quantity)) + + def updateOrderShippingAddress(subdomain: String, accessToken: String, request: ShopifyOrderShippingAddressUpdateRequest): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.updateOrderShippingAddress(request)) + + def updateProduct(subdomain: String, accessToken: String, request: ShopifyProductUpdateRequest): Task[ShopifyProduct] = + sdk(subdomain, accessToken).map(_.updateProduct(request)) + + def updateVariant(subdomain: String, accessToken: String, request: ShopifyVariantUpdateRequest): Task[ShopifyVariant] = + sdk(subdomain, accessToken).map(_.updateVariant(request)) + + private def sdk(subdomain: String, accessToken: String): Task[ShopifySdk] = + ZIO.attempt(ShopifySdk.newBuilder().withSubdomain(subdomain).withAccessToken(accessToken).build()) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify_app/ShopifyApp.scala b/jvm/src/main/scala/com/harana/modules/shopify_app/ShopifyApp.scala new file mode 100644 index 0000000..0f326a5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify_app/ShopifyApp.scala @@ -0,0 +1,115 @@ +package com.harana.modules.shopify_app + +import com.shopify.model._ +import org.joda.time.DateTime +import zio.Task +import zio.macros.accessible + +@accessible +trait ShopifyApp { + def activateRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] + + def cancelFulfillment(subdomain: String, accessToken: String, orderId: String, fulfillmentId: String): Task[ShopifyFulfillment] + + def cancelOrder(subdomain: String, accessToken: String, orderId: String, reason: String): Task[ShopifyOrder] + + def closeOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] + + def createCustomCollection(subdomain: String, accessToken: String, request: ShopifyCustomCollectionCreationRequest): Task[ShopifyCustomCollection] + + def createFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentCreationRequest): Task[ShopifyFulfillment] + + def createGiftCard(subdomain: String, accessToken: String, request: ShopifyGiftCardCreationRequest): Task[ShopifyGiftCard] + + def createOrder(subdomain: String, accessToken: String, request: ShopifyOrderCreationRequest): Task[ShopifyOrder] + + def createProduct(subdomain: String, accessToken: String, request: ShopifyProductCreationRequest): Task[ShopifyProduct] + + def createProductMetafield(subdomain: String, accessToken: String, request: ShopifyProductMetafieldCreationRequest): Task[Metafield] + + def createRecurringApplicationCharge(subdomain: String, accessToken: String, request: ShopifyRecurringApplicationChargeCreationRequest): Task[ShopifyRecurringApplicationCharge] + + def createVariantMetafield(subdomain: String, accessToken: String, request: ShopifyVariantMetafieldCreationRequest): Task[Metafield] + + def deleteProduct(subdomain: String, accessToken: String, productId: String): Task[Boolean] + + def getAccessToken(subdomain: String, accessToken: String): Task[String] + + def getCustomCollections(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] + + def getCustomCollections(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] + + def getCustomCollections(subdomain: String, accessToken: String): Task[List[ShopifyCustomCollection]] + + def getCustomer(subdomain: String, accessToken: String, customerId: String): Task[ShopifyCustomer] + + def getCustomers(subdomain: String, accessToken: String, request: ShopifyGetCustomersRequest): Task[ShopifyPage[ShopifyCustomer]] + + def getLocations(subdomain: String, accessToken: String): Task[List[ShopifyLocation]] + + def getOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] + + def getOrderMetafields(subdomain: String, accessToken: String, orderId: String): Task[List[Metafield]] + + def getOrderRisks(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyOrderRisk]] + + def getOrderTransactions(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyTransaction]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String): Task[ShopifyPage[ShopifyOrder]] + + def getProduct(subdomain: String, accessToken: String, productId: String): Task[ShopifyProduct] + + def getProductCount(subdomain: String, accessToken: String): Task[Int] + + def getProductMetafields(subdomain: String, accessToken: String, productId: String): Task[List[Metafield]] + + def getProducts(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] + + def getProducts(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] + + def getProducts(subdomain: String, accessToken: String): Task[ShopifyProducts] + + def getRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] + + def getShop(subdomain: String, accessToken: String): Task[ShopifyShop] + + def getUpdatedOrdersCreatedBefore(subdomain: String, accessToken: String, minimumUpdatedAtDate: DateTime, maximumUpdatedAtDate: DateTime, maximumCreatedAtDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getVariant(subdomain: String, accessToken: String, variantId: String): Task[ShopifyVariant] + + def getVariantMetafields(subdomain: String, accessToken: String, variantId: String): Task[List[Metafield]] + + def refund(subdomain: String, accessToken: String, request: ShopifyRefundCreationRequest): Task[ShopifyRefund] + + def revokeOAuthToken(subdomain: String, accessToken: String): Task[Boolean] + + def searchCustomers(subdomain: String, accessToken: String, query: String): Task[ShopifyPage[ShopifyCustomer]] + + def updateCustomer(subdomain: String, accessToken: String, request: ShopifyCustomerUpdateRequest): Task[ShopifyCustomer] + + def updateFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentUpdateRequest): Task[ShopifyFulfillment] + + def updateInventoryLevel(subdomain: String, accessToken: String, inventoryItemId: String, locationId: String, quantity: Long): Task[ShopifyInventoryLevel] + + def updateOrderShippingAddress(subdomain: String, accessToken: String, request: ShopifyOrderShippingAddressUpdateRequest): Task[ShopifyOrder] + + def updateProduct(subdomain: String, accessToken: String, request: ShopifyProductUpdateRequest): Task[ShopifyProduct] + + def updateVariant(subdomain: String, accessToken: String, request: ShopifyVariantUpdateRequest): Task[ShopifyVariant] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/slack/LiveSlack.scala b/jvm/src/main/scala/com/harana/modules/slack/LiveSlack.scala new file mode 100644 index 0000000..2575134 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/slack/LiveSlack.scala @@ -0,0 +1,599 @@ +package com.harana.modules.slack + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.hubspot.slack.client.methods.params.auth.AuthRevokeParams +import com.hubspot.slack.client.methods.params.channels._ +import com.hubspot.slack.client.methods.params.chat._ +import com.hubspot.slack.client.methods.params.conversations._ +import com.hubspot.slack.client.methods.params.dialog.DialogOpenParams +import com.hubspot.slack.client.methods.params.files.{FilesSharedPublicUrlParams, FilesUploadParams} +import com.hubspot.slack.client.methods.params.group.{GroupsKickParams, GroupsListParams} +import com.hubspot.slack.client.methods.params.im.ImOpenParams +import com.hubspot.slack.client.methods.params.reactions.ReactionsAddParams +import com.hubspot.slack.client.methods.params.search.SearchMessagesParams +import com.hubspot.slack.client.methods.params.usergroups._ +import com.hubspot.slack.client.methods.params.usergroups.users.UsergroupUsersUpdateParams +import com.hubspot.slack.client.methods.params.users.{UserEmailParams, UsersInfoParams, UsersListParams} +import com.hubspot.slack.client.methods.{ResultSort, ResultSortOrder} +import com.hubspot.slack.client.models.conversations.{Conversation, ConversationType} +import com.hubspot.slack.client.models.dialog.SlackDialog +import com.hubspot.slack.client.models.files.SlackFile +import com.hubspot.slack.client.models.group.SlackGroup +import com.hubspot.slack.client.models.response.auth.AuthTestResponse +import com.hubspot.slack.client.models.response.{MessagePage, SlackError} +import com.hubspot.slack.client.models.teams.SlackTeam +import com.hubspot.slack.client.models.usergroups.SlackUsergroup +import com.hubspot.slack.client.models.users.SlackUser +import com.hubspot.slack.client.models.{Attachment, LiteMessage, SlackChannel} +import com.hubspot.slack.client.{SlackClient, SlackClientFactory, SlackClientRuntimeConfig} +import zio.{IO, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveSlack { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSlack(config, logger, micrometer) + } +} + +case class LiveSlack(config: Config, logger: Logger, micrometer: Micrometer) extends Slack { + + def newClient(token: String): IO[Nothing, SlackClient] = { + val config = SlackClientRuntimeConfig.builder() + .setTokenSupplier(() => token) + .build() + ZIO.succeed(SlackClientFactory.defaultFactory().build(config)) + } + + + def Auth(client: SlackClient): IO[Either[SlackError, Throwable], AuthTestResponse] = + client.testAuth() + + + def revokeAuth(client: SlackClient): IO[Either[SlackError, Throwable], Boolean] = + client.revokeAuth(AuthRevokeParams.builder().build()).map(_.isRevoked) + + + def searchMessages(client: SlackClient, + count: Int, + page: Int, + query: String, + shouldHighlight: Boolean, + sort: ResultSort, + sortOrder: ResultSortOrder): IO[Either[SlackError, Throwable], MessagePage] = + client.searchMessages( + SearchMessagesParams.builder() + .setCount(count) + .setPage(page) + .setQuery(query) + .setShouldHighlight(shouldHighlight) + .setSort(sort) + .setSortOrder(sortOrder) + .build() + ).map(_.getMessages) + + + def findReplies(client: SlackClient, + channelId: String, + threadTs: String): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.findReplies( + FindRepliesParams.builder() + .setChannelId(channelId) + .setThreadTs(threadTs) + .build() + ).map(_.getMessages.asScala.toList) + + + def findUser(client: SlackClient, + userId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackUser] = + client.findUser( + UsersInfoParams.builder() + .setIncludeLocale(includeLocale) + .setUserId(userId) + .build() + ).map(_.getUser) + + + def lookupUserByEmail(client: SlackClient, + email: String): IO[Either[SlackError, Throwable], SlackUser] = + client.lookupUserByEmail( + UserEmailParams.builder() + .setEmail(email) + .build() + ).map(_.getUser) + + + def listUsers(client: SlackClient): IO[Either[SlackError, Throwable], List[SlackUser]] = + client.listUsers() + + + def listUsersPaginated(client: SlackClient, + cursor: Option[String], + limit: Option[Int]): IO[Either[SlackError, Throwable], List[SlackUser]] = + client.listUsersPaginated( + UsersListParams.builder() + .setCursor(cursor) + .setLimit(limit) + .build() + ).map(_.getMembers.asScala.toList) + + + def listChannels(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackChannel]] = + client.listChannels( + ChannelsListParams.builder() + .setCursor(cursor) + .setLimit(limit) + .setShouldExcludeArchived(shouldExcludeArchived) + .setShouldExcludeMembers(shouldExcludeMembers) + .build() + ) + + + def channelHistory(client: SlackClient, + channelId: String, + count: Option[Int], + inclusive: Boolean): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.channelHistory( + ChannelsHistoryParams.builder() + .setChannelId(channelId) + .setCount(count) + .setInclusive(inclusive) + .build() + ) + + + def getChannelByName(client: SlackClient, + channelName: String, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], SlackChannel] = + client.getChannelByName( + channelName, + ChannelsFilter.builder() + .setShouldExcludeArchived(shouldExcludeArchived) + .setShouldExcludeMembers(shouldExcludeMembers) + .build() + ) + + + def getChannelInfo(client: SlackClient, + channelId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackChannel] = + client.getChannelInfo( + ChannelsInfoParams.builder() + .setChannelId(channelId) + .setIncludeLocale(includeLocale) + .build() + ).map(_.getChannel) + + + def kickUserFromChannel(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] = + client.kickUserFromChannel( + ChannelsKickParams.builder() + .setChannelId(channelId) + .setUserId(userId) + .build() + ).unit + + + def listGroups(client: SlackClient, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackGroup]] = + client.listGroups( + GroupsListParams.builder() + .setShouldExcludeArchived(shouldExcludeArchived) + .setShouldExcludeMembers(shouldExcludeMembers) + .build()) + + + def kickUserFromGroup(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] = + client.kickUserFromGroup( + GroupsKickParams.builder() + .setChannelId(channelId) + .setUserId(userId) + .build() + ).unit + + + def openIm(client: SlackClient, + includeLocale: Boolean, + returnIm: Boolean, + userId: String): IO[Either[SlackError, Throwable], Unit] = + client.openIm( + ImOpenParams.builder() + .setIncludeLocale(includeLocale) + .setReturnIm(returnIm) + .setUserId(userId) + .build() + ).unit + + + def postMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + iconEmoji: Option[String], + iconUrl: Option[String], + replyBroadcast: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String]): IO[Either[SlackError, Throwable], String] = + client.postMessage( + ChatPostMessageParams.builder() + .setAsUser(asUser) + .setAttachments(attachments.asJava) + .setChannelId(channelId) + .setIconEmoji(iconEmoji) + .setIconUrl(iconUrl) + .setLinkNames(shouldLinkNames) + .setReplyBroadcast(replyBroadcast) + .setText(text) + .setThreadTs(threadTs) + .build() + ).map(_.getTs) + + + def postEphemeralMessage(client: SlackClient, + attachments: List[Attachment], + channelId: String, + parseMode: String, + sendAsUser: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String], + userToSendTo: String): IO[Either[SlackError, Throwable], String] = + client.postEphemeralMessage( + ChatPostEphemeralMessageParams.builder() + .setAttachments(attachments.asJava) + .setChannelId(channelId) + .setParseMode(parseMode) + .setSendAsUser(sendAsUser) + .setShouldLinkNames(shouldLinkNames) + .setText(text) + .setThreadTs(threadTs) + .setUserToSendTo(userToSendTo) + .build() + ).map(_.getMessageTs) + + + def updateMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + parse: String, + shouldLinkNames: Boolean, + text: Option[String], + ts: String): IO[Either[SlackError, Throwable], Unit] = + client.updateMessage( + ChatUpdateMessageParams.builder() + .setAsUser(asUser) + .setAttachments(attachments.asJava) + .setChannelId(channelId) + .setParse(parse) + .setShouldLinkNames(shouldLinkNames) + .setText(text) + .setTs(ts) + .build() + ).unit + + + def getPermalink(client: SlackClient, + channelId: String, + messageTs: String): IO[Either[SlackError, Throwable], String] = + client.getPermalink( + ChatGetPermalinkParams.builder() + .setChannelId(channelId) + .setMessageTs(messageTs) + .build() + ).map(_.getPermalink) + + + def deleteMessage(client: SlackClient, + asUser: Boolean, + channelId: String, + messageToDeleteTs: String): IO[Either[SlackError, Throwable], Unit] = + client.deleteMessage( + ChatDeleteParams.builder() + .setAsUser(asUser) + .setChannelId(channelId) + .setMessageToDeleteTs(messageToDeleteTs) + .build() + ).unit + + + def listConversations(client: SlackClient, + conversationTypes: List[ConversationType], + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], List[Conversation]] = + client.listConversations( + ConversationsListParams.builder() + .setConversationTypes(conversationTypes.asJava) + .setCursor(cursor) + .setLimit(limit) + .setShouldExcludeArchived(shouldExcludeArchived) + .build()) + + + def usersConversations(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + userId: Option[String]): IO[Either[SlackError, Throwable], List[Conversation]] = + client.usersConversations( + ConversationsUserParams.builder() + .setCursor(cursor) + .setLimit(limit) + .setShouldExcludeArchived(shouldExcludeArchived) + .setUserId(userId) + .build() + ) + + + def createConversation(client: SlackClient, + isPrivate: Boolean, + name: String): IO[Either[SlackError, Throwable], Unit] = + client.createConversation( + ConversationCreateParams.builder() + .setIsPrivate(isPrivate) + .setName(name) + .build() + ).unit + + + def inviteToConversation(client: SlackClient, + channelId: String, + users: List[String]): IO[Either[SlackError, Throwable], Unit] = + client.inviteToConversation( + ConversationInviteParams.builder() + .setChannelId(channelId) + .setUsers(users.asJava) + .build() + ).unit + + + def unarchiveConversation(client: SlackClient, channelId: String): IO[Either[SlackError, Throwable], Unit] = + client.unarchiveConversation( + ConversationUnarchiveParams.builder() + .setChannelId(channelId) + .build() + ).unit + + + def getConversationHistory(client: SlackClient, + channelId: String, + inclusive: Boolean, + limit: Option[Int], + newestTimestamp: Option[String], + oldestTimestamp: Option[String]): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.getConversationHistory( + ConversationsHistoryParams.builder() + .setChannelId(channelId) + .setInclusive(inclusive) + .setLimit(limit) + .setNewestTimestamp(newestTimestamp) + .setOldestTimestamp(oldestTimestamp) + .build() + ) + + + def archiveConversation(client: SlackClient, + channelId: String): IO[Either[SlackError, Throwable], Unit] = + client.archiveConversation( + ConversationArchiveParams.builder() + .setChannelId(channelId) + .build() + ).unit + + + def getConversationInfo(client: SlackClient, + conversationId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], Conversation] = + client.getConversationInfo( + ConversationsInfoParams.builder() + .setConversationId(conversationId) + .setIncludeLocale(includeLocale) + .build() + ).map(_.getConversation) + + + def getConversationReplies(client: SlackClient, + channel: String, + ts: String): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.getConversationReplies( + ConversationsRepliesParams.builder() + .setChannel(channel) + .setTs(ts) + .build() + ).map(_.getMessages.asScala.toList) + + + def getConversationByName(client: SlackClient, + conversationName: String, + conversationTypes: List[ConversationType], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], Conversation] = + client.getConversationByName( + conversationName, + ConversationsFilter.builder() + .setConversationTypes(conversationTypes.asJava) + .setShouldExcludeArchived(shouldExcludeArchived) + .build() + ).map(c => Conversation.builder().setId(c.getId).setName(c.getName).build()) + + + def openConversation(client: SlackClient, + channelId: Option[String], + returnIm: Boolean, + users: List[String]): IO[Either[SlackError, Throwable], Conversation] = + client.openConversation( + ConversationOpenParams.builder() + .setChannelId(channelId) + .setReturnIm(returnIm) + .setUsers(users.asJava) + .build() + ).map(_.getConversation) + + + // Usergroups + def createUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: String, + rawChannelIds: List[String]): IO[Either[SlackError, Throwable], SlackUsergroup] = + client.createUsergroup( + UsergroupCreateParams.builder() + .setDescription(description) + .setHandle(handle) + .setIncludeCount(includeCount) + .setName(name) + .setRawChannelIds(rawChannelIds.asJava) + .build() + ).map(_.getUsergroup) + + + def listUsergroups(client: SlackClient, + includeCount: Boolean, + includeDisabled: Boolean, + includeUsers: Boolean): IO[Either[SlackError, Throwable], List[SlackUsergroup]] = + client.listUsergroups( + UsergroupListParams.builder() + .setIncludeCount(includeCount) + .setIncludeDisabled(includeDisabled) + .setIncludeUsers(includeUsers) + .build() + ) + + + def updateUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: Option[String], + rawChannelIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] = + client.updateUsergroup( + UsergroupUpdateParams.builder() + .setDescription(description) + .setHandle(handle) + .setIncludeCount(includeCount) + .setName(name) + .setRawChannelIds(rawChannelIds.asJava) + .setUsergroupId(userGroupId) + .build() + ).map(_.getUsergroup) + + + def enableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] = { + client.enableUsergroup( + UsergroupEnableParams.builder() + .setIncludeCount(includeCount) + .setUsergroupId(userGroupId) + .build() + ).unit + } + + + def disableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] = + client.disableUsergroup( + UsergroupDisableParams.builder() + .setIncludeCount(includeCount) + .setUsergroupId(userGroupId) + .build() + ).unit + + + def updateUsergroupUsers(client: SlackClient, + includeCount: Boolean, + rawUserIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] = + client.updateUsergroupUsers( + UsergroupUsersUpdateParams.builder() + .setIncludeCount(includeCount) + .setRawUserIds(rawUserIds.asJava) + .setUsergroupId(userGroupId) + .build() + ).map(_.getUsergroup) + + + def openDialog(client: SlackClient, + slackDialog: SlackDialog, + triggerId: String): IO[Either[SlackError, Throwable], Unit] = + client.openDialog( + DialogOpenParams.builder() + .setDialog(slackDialog) + .setTriggerId(triggerId) + .build() + ).unit + + + def addReaction(client: SlackClient, + channel: Option[String], + file: Option[String], + fileComment: Option[String], + name: String, + timestamp: Option[String]): IO[Either[SlackError, Throwable], Unit] = + client.addReaction( + ReactionsAddParams.builder() + .setChannel(channel) + .setFile(file) + .setFileComment(fileComment) + .setName(name) + .setTimestamp(timestamp) + .build() + ).unit + + + def getTeamInfo(client: SlackClient): IO[Either[SlackError, Throwable], SlackTeam] = + client.getTeamInfo.map(_.getSlackTeam) + + + def uploadFile(client: SlackClient, + channels: List[String], + content: Option[String], + filename: Option[String], + initialComment: Option[String], + threadTs: Option[String], + title: Option[String]): IO[Either[SlackError, Throwable], SlackFile] = + client.uploadFile( + FilesUploadParams.builder() + .setChannels(channels.asJava) + .setContent(content) + .setFilename(filename) + .setInitialComment(initialComment) + .setThreadTs(threadTs) + .setTitle(title) + .build() + ).map(_.getFile) + + + def shareFilePublically(client: SlackClient, + fileId: String): IO[Either[SlackError, Throwable], SlackFile] = + client.shareFilePublically( + FilesSharedPublicUrlParams.builder() + .setFileId(fileId) + .build() + ).map(_.getFile) + + + def listEmoji(client: SlackClient): IO[Either[SlackError, Throwable], Map[String, String]] = + client.listEmoji.map(_.getEmoji.asScala.toMap) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/slack/Slack.scala b/jvm/src/main/scala/com/harana/modules/slack/Slack.scala new file mode 100644 index 0000000..d4d46f1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/slack/Slack.scala @@ -0,0 +1,236 @@ +package com.harana.modules.slack + +import com.hubspot.slack.client.SlackClient +import com.hubspot.slack.client.methods.{ResultSort, ResultSortOrder} +import com.hubspot.slack.client.models.conversations.{Conversation, ConversationType} +import com.hubspot.slack.client.models.dialog.SlackDialog +import com.hubspot.slack.client.models.files.SlackFile +import com.hubspot.slack.client.models.group.SlackGroup +import com.hubspot.slack.client.models.response.auth.AuthTestResponse +import com.hubspot.slack.client.models.response.{MessagePage, SlackError} +import com.hubspot.slack.client.models.teams.SlackTeam +import com.hubspot.slack.client.models.usergroups.SlackUsergroup +import com.hubspot.slack.client.models.users.SlackUser +import com.hubspot.slack.client.models.{Attachment, LiteMessage, SlackChannel} +import zio.IO +import zio.macros.accessible + +@accessible +trait Slack { + def newClient(token: String): IO[Nothing, SlackClient] + + def Auth(client: SlackClient): IO[Either[SlackError, Throwable], AuthTestResponse] + + def revokeAuth(client: SlackClient): IO[Either[SlackError, Throwable], Boolean] + + def searchMessages(client: SlackClient, + count: Int, + page: Int, + query: String, + shouldHighlight: Boolean, + sort: ResultSort, + sortOrder: ResultSortOrder): IO[Either[SlackError, Throwable], MessagePage] + + def findReplies(client: SlackClient, + channelId: String, + threadTs: String): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def findUser(client: SlackClient, + userId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackUser] + + def lookupUserByEmail(client: SlackClient, + email: String): IO[Either[SlackError, Throwable], SlackUser] + + def listUsers(client: SlackClient): IO[Either[SlackError, Throwable], List[SlackUser]] + + def listUsersPaginated(client: SlackClient, + cursor: Option[String], + limit: Option[Int]): IO[Either[SlackError, Throwable], List[SlackUser]] + + def listChannels(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackChannel]] + + def channelHistory(client: SlackClient, + channelId: String, + count: Option[Int], + inclusive: Boolean): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def getChannelByName(client: SlackClient, + channelName: String, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], SlackChannel] + + def getChannelInfo(client: SlackClient, + channelId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackChannel] + + def kickUserFromChannel(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] + + def listGroups(client: SlackClient, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackGroup]] + + def kickUserFromGroup(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] + + def openIm(client: SlackClient, + includeLocale: Boolean, + returnIm: Boolean, + userId: String): IO[Either[SlackError, Throwable], Unit] + + def postMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + iconEmoji: Option[String], + iconUrl: Option[String], + replyBroadcast: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String]): IO[Either[SlackError, Throwable], String] + + def postEphemeralMessage(client: SlackClient, + attachments: List[Attachment], + channelId: String, + parseMode: String, + sendAsUser: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String], + userToSendTo: String): IO[Either[SlackError, Throwable], String] + + def updateMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + parse: String, + shouldLinkNames: Boolean, + text: Option[String], + ts: String): IO[Either[SlackError, Throwable], Unit] + + def getPermalink(client: SlackClient, + channelId: String, + messageTs: String): IO[Either[SlackError, Throwable], String] + + def deleteMessage(client: SlackClient, + asUser: Boolean, + channelId: String, + messageToDeleteTs: String): IO[Either[SlackError, Throwable], Unit] + + def listConversations(client: SlackClient, + conversationTypes: List[ConversationType], + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], List[Conversation]] + + def usersConversations(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + userId: Option[String]): IO[Either[SlackError, Throwable], List[Conversation]] + + def createConversation(client: SlackClient, + isPrivate: Boolean, + name: String): IO[Either[SlackError, Throwable], Unit] + + def inviteToConversation(client: SlackClient, + channelId: String, + users: List[String]): IO[Either[SlackError, Throwable], Unit] + + def unarchiveConversation(client: SlackClient, + channelId: String): IO[Either[SlackError, Throwable], Unit] + + def getConversationHistory(client: SlackClient, + channelId: String, + inclusive: Boolean, + limit: Option[Int], + newestTimestamp: Option[String], + oldestTimestamp: Option[String]): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def archiveConversation(client: SlackClient, + channelId: String): IO[Either[SlackError, Throwable], Unit] + + def getConversationInfo(client: SlackClient, + conversationId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], Conversation] + + def getConversationReplies(client: SlackClient, + channel: String, + ts: String): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def getConversationByName(client: SlackClient, + conversationName: String, + conversationTypes: List[ConversationType], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], Conversation] + + def openConversation(client: SlackClient, + channelId: Option[String], + returnIm: Boolean, + users: List[String]): IO[Either[SlackError, Throwable], Conversation] + + def createUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: String, + rawChannelIds: List[String]): IO[Either[SlackError, Throwable], SlackUsergroup] + + def listUsergroups(client: SlackClient, + includeCount: Boolean, + includeDisabled: Boolean, + includeUsers: Boolean): IO[Either[SlackError, Throwable], List[SlackUsergroup]] + + def updateUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: Option[String], + rawChannelIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] + + def enableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] + + def disableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] + + def updateUsergroupUsers(client: SlackClient, + includeCount: Boolean, + rawUserIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] + + def openDialog(client: SlackClient, + slackDialog: SlackDialog, + triggerId: String): IO[Either[SlackError, Throwable], Unit] + + def addReaction(client: SlackClient, + channel: Option[String], + file: Option[String], + fileComment: Option[String], + name: String, + timestamp: Option[String]): IO[Either[SlackError, Throwable], Unit] + + def getTeamInfo(client: SlackClient): IO[Either[SlackError, Throwable], SlackTeam] + + def uploadFile(client: SlackClient, + channels: List[String], + content: Option[String], + filename: Option[String], + initialComment: Option[String], + threadTs: Option[String], + title: Option[String]): IO[Either[SlackError, Throwable], SlackFile] + + def shareFilePublically(client: SlackClient, + fileId: String): IO[Either[SlackError, Throwable], SlackFile] + + def listEmoji(client: SlackClient): IO[Either[SlackError, Throwable], Map[String, String]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/slack/package.scala b/jvm/src/main/scala/com/harana/modules/slack/package.scala new file mode 100644 index 0000000..4ccc070 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/slack/package.scala @@ -0,0 +1,39 @@ +package com.harana.modules + +import com.hubspot.algebra.Result +import com.hubspot.slack.client.models.response.SlackError +import zio.{IO, ZIO} + +import java.util.Optional +import java.util.concurrent.CompletableFuture +import scala.compat.java8.FutureConverters._ +import scala.compat.java8.OptionConverters._ +import scala.concurrent.ExecutionContext.Implicits.global +import scala.jdk.CollectionConverters._ + +package object slack { + + implicit def toIO[SlackError, A](fn: CompletableFuture[Result[A, SlackError]]): IO[Either[SlackError, Throwable], A] = + ZIO.async { (cb: IO[Either[SlackError, Throwable], A] => Unit) => + fn.toScala.onComplete { f => + f.toEither match { + case Left(t) => cb(ZIO.fail(Right(t))) + case Right(x) => try { + if (x.isOk) cb(ZIO.succeed(x.unwrapOrElseThrow())) + else cb(ZIO.fail(Left(x.unwrapErrOrElseThrow()))) + } catch { + case e: Exception => cb(ZIO.fail(Right(e))) + } + } + } + } + + implicit def toIOIterable[A](fn: java.lang.Iterable[CompletableFuture[Result[java.util.List[A], SlackError]]]): IO[Either[SlackError, Throwable], List[A]] = + ZIO.foreach(fn.asScala.toList)(toIO).map(_.flatMap(_.asScala.toList)) + + implicit def toOptionalInt(opt: Option[Int]): Optional[Integer] = + opt.map { o => new Integer(o) }.asJava + + implicit def toOptionalDefault[A](opt: Option[A]): Optional[A] = + opt.asJava +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeAccounts.scala new file mode 100644 index 0000000..01814f3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeAccounts.scala @@ -0,0 +1,94 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.connect._ +import zio.{IO, ZIO, ZLayer} + +object LiveStripeAccounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeAccounts(config, logger, micrometer) + } +} + +case class LiveStripeAccounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeAccounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).accounts) + + def create(country: Option[String] = None, + email: Option[String] = None, + custom: Boolean = false, + accountToken: Option[String] = None, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessURL: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + tosAcceptance: Option[Acceptance] = None): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.create(country, email, custom, accountToken, businessLogo, businessName, businessPrimaryColor, businessURL, legalEntity, tosAcceptance)) + } yield r + + + def byId(accountId: String): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.byId(accountId)) + } yield r + + + def update(accountId: String, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessUrl: Option[String] = None, + debitNegativeBalances: Option[Boolean] = None, + declineChargeOn: Option[DeclineChargeOn] = None, + defaultCurrency: Option[String] = None, + email: Option[String] = None, + externalAccount: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + metadata: Map[String, String] = Map.empty, + productDescription: Option[String] = None, + statementDescriptor: Option[String] = None, + supportEmail: Option[String] = None, + supportPhone: Option[String] = None, + supportUrl: Option[String] = None, + tosAcceptance: Option[Acceptance] = None, + transferSchedule: Option[TransferSchedule] = None, + transferStatementDescriptor: Option[String] = None): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.update(accountId, businessLogo, businessName, businessPrimaryColor, businessUrl, debitNegativeBalances, declineChargeOn, + defaultCurrency, email, externalAccount, legalEntity, metadata, productDescription, statementDescriptor, supportEmail, supportPhone, supportUrl, + tosAcceptance, transferSchedule, transferStatementDescriptor)) + } yield r + + + def delete(accountId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(accountId)) + } yield r + + + def reject(accountId: String, reason: String): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.reject(accountId, reason)) + } yield r + + + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Account]] = + for { + c <- client + r <- execute(c.list(config)) + } yield r +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFeeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFeeRefunds.scala new file mode 100644 index 0000000..d3b69db --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFeeRefunds.scala @@ -0,0 +1,52 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.connect.FeeRefund +import zio.{IO, ZIO, ZLayer} + +object LiveStripeApplicationFeeRefunds { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeApplicationFeeRefunds(config, logger, micrometer) + } +} + +case class LiveStripeApplicationFeeRefunds(config: Config, logger: Logger, micrometer: Micrometer) extends StripeApplicationFeeRefunds { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).applicationFees.refunds) + + def create(feeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] = + for { + c <- client + r <- execute(c.create(feeId, amount, metadata)) + } yield r + + + def byId(feeId: String, refundId: String): IO[ResponseError, FeeRefund] = + for { + c <- client + r <- execute(c.byId(feeId, refundId)) + } yield r + + + def update(feeId: String, refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] = + for { + c <- client + r <- execute(c.update(feeId, refundId, metadata)) + } yield r + + + def list(feeId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[FeeRefund]] = + for { + c <- client + r <- execute(c.list(feeId, config)) + } yield r +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFees.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFees.scala new file mode 100644 index 0000000..d1503e0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFees.scala @@ -0,0 +1,39 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.connect.ApplicationFee +import zio.{IO, ZIO, ZLayer} + +object LiveStripeApplicationFees { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeApplicationFees(config, logger, micrometer) + } +} + +case class LiveStripeApplicationFees(config: Config, logger: Logger, micrometer: Micrometer) extends StripeApplicationFees { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).applicationFees) + + def byId(feeId: String): IO[ResponseError, ApplicationFee] = + for { + c <- client + r <- execute(c.byId(feeId)) + } yield r + + + def list(charge: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[ApplicationFee]] = + for { + c <- client + r <- execute(c.list(charge, created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeBalance.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeBalance.scala new file mode 100644 index 0000000..5757987 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeBalance.scala @@ -0,0 +1,50 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.balance.{Balance, BalanceTransaction} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeBalance { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeBalance(config, logger, micrometer) + } +} + +case class LiveStripeBalance(config: Config, logger: Logger, micrometer: Micrometer) extends StripeBalance { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).balance) + + def get: IO[ResponseError, Balance] = + for { + c <- client + r <- execute(c.apply()) + } yield r + + + def byId(id: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, BalanceTransaction] = + for { + c <- client + r <- execute(c.byId(id, config)) + } yield r + + + def list(availableOn: Option[TimestampFilter] = None, + created: Option[TimestampFilter] = None, + currency: Option[String] = None, + source: Option[String] = None, + transfer: Option[String] = None, + `type`: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BalanceTransaction]] = + for { + c <- client + r <- execute(c.list(availableOn, created, currency, source, transfer, `type`, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCharges.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCharges.scala new file mode 100644 index 0000000..839004c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCharges.scala @@ -0,0 +1,84 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.{Charge, FraudDetails, Shipping} +import zio.{IO, ZIO, ZLayer} + +import scala.language.implicitConversions + +object LiveStripeCharges { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCharges(config, logger, micrometer) + } +} + +case class LiveStripeCharges(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCharges { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).charges) + + def create(amount: Money, + currency: String, + applicationFee: Option[Money] = None, + capture: Boolean = true, + description: Option[String] = None, + destination: Option[String] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None, + customer: Option[String] = None, + source: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.create(amount, currency, applicationFee, capture, description, destination, metadata, receiptEmail, shipping, + customer, source, statementDescriptor)) + } yield r + + + def byId(chargeId: String): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.byId(chargeId)) + } yield r + + + def update(chargeId: String, + description: Option[String] = None, + fraudDetails: Option[FraudDetails] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.update(chargeId, description, fraudDetails, metadata, receiptEmail, shipping)) + } yield r + + + def capture(chargeId: String, + amount: Option[Money] = None, + applicationFee: Option[Money] = None, + receiptEmail: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.capture(chargeId, amount, applicationFee, receiptEmail, statementDescriptor)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + source: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Charge]] = + for { + c <- client + r <- execute(c.list(created, customer, source, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCountrySpecs.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCountrySpecs.scala new file mode 100644 index 0000000..cde1388 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCountrySpecs.scala @@ -0,0 +1,37 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe.connect.CountrySpec +import com.outr.stripe.{QueryConfig, ResponseError, Stripe, StripeList} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCountrySpecs { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCountrySpecs(config, logger, micrometer) + } +} + +case class LiveStripeCountrySpecs(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCountrySpecs { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).countrySpecs) + + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[CountrySpec]] = + for { + c <- client + r <- execute(c.list(config)) + } yield r + + + def byId(countryCode: String): IO[ResponseError, CountrySpec] = + for { + c <- client + r <- execute(c.byId(countryCode)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCoupons.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCoupons.scala new file mode 100644 index 0000000..8a79579 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCoupons.scala @@ -0,0 +1,67 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.Coupon +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCoupons { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCoupons(config, logger, micrometer) + } +} + +case class LiveStripeCoupons(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCoupons { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).coupons) + + def create(couponId: String, + duration: String, + amountOff: Option[Money] = None, + currency: Option[String] = None, + durationInMonths: Option[Int] = None, + maxRedemptions: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + percentOff: Option[Int] = None, + redeemBy: Option[Long] = None): IO[ResponseError, Coupon] = + for { + c <- client + r <- execute(c.create(couponId, duration, amountOff, currency, durationInMonths, maxRedemptions, metadata, percentOff, redeemBy)) + } yield r + + + def byId(couponId: String): IO[ResponseError, Coupon] = + for { + c <- client + r <- execute(c.byId(couponId)) + } yield r + + + def update(couponId: String, metadata: Map[String, String]): IO[ResponseError, Coupon] = + for { + c <- client + r <- execute(c.update(couponId, metadata)) + } yield r + + + def delete(couponId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(couponId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Coupon]] = + for { + c <- client + r <- execute(c.list(created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerBankAccounts.scala new file mode 100644 index 0000000..9a4fe45 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerBankAccounts.scala @@ -0,0 +1,76 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.BankAccount +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCustomerBankAccounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCustomerBankAccounts(config, logger, micrometer) + } +} + +case class LiveStripeCustomerBankAccounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCustomerBankAccounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).customers.sources.bankAccounts) + + def create(customerId: String, + source: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.create(customerId, source, defaultForCurrency, metadata)) + } yield r + + + def byId(customerId: String, bankAccountId: String): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.byId(customerId, bankAccountId)) + } yield r + + + def update(customerId: String, + bankAccountId: String, + accountHolderName: Option[String] = None, + accountHolderType: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.update(customerId, bankAccountId, accountHolderName, accountHolderType, metadata)) + } yield r + + + def verify(customerId: String, + bankAccountId: String, + amount1: Option[Money] = None, + amount2: Option[Money] = None, + verificationMethod: Option[String] = None): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.verify(customerId, bankAccountId, amount1, amount2, verificationMethod)) + } yield r + + + def delete(customerId: String, bankAccountId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(customerId, bankAccountId)) + } yield r + + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] = + for { + c <- client + r <- execute(c.list(customerId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerCreditCards.scala new file mode 100644 index 0000000..12332c7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerCreditCards.scala @@ -0,0 +1,73 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.Card +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCustomerCreditCards { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCustomerCreditCards(config, logger, micrometer) + } +} + +case class LiveStripeCustomerCreditCards(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCustomerCreditCards { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).customers.sources.cards) + + def create(customerId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.create(customerId, source, externalAccount, defaultForCurrency, metadata)) + } yield r + + + def byId(customerId: String, cardId: String): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.byId(customerId, cardId)) + } yield r + + + def update(customerId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.update(customerId, cardId, addressCity, addressCountry, addressLine1, addressLine2, addressState, addressZip, defaultForCurrency, + expMonth, expYear, metadata, name)) + } yield r + + def delete(customerId: String, cardId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(customerId, cardId)) + } yield r + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] = + for { + c <- client + r <- execute(c.list(customerId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomers.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomers.scala new file mode 100644 index 0000000..2905ac3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomers.scala @@ -0,0 +1,90 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.{Address, Card, Shipping} +import com.outr.stripe.customer.Customer +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCustomers { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCustomers(config, logger, micrometer) + } +} + +case class LiveStripeCustomers(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCustomers { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).customers) + + def create(address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + paymentMethodId: Option[String] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] = + for { + c <- client + r <- execute(c.create(address, balance, coupon, description, email, invoicePrefix, metadata, name, nextInvoiceSequence, paymentMethodId, phone, promotionCode, shipping, source, taxExempt)) + } yield r + + + def byId(customerId: String): IO[ResponseError, Customer] = + for { + c <- client + r <- execute(c.byId(customerId)) + } yield r + + + def update(customerId: String, + address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + defaultSource: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] = + for { + c <- client + r <- execute(c.update(customerId, address, balance, coupon, defaultSource, description, email, invoicePrefix, metadata, name, nextInvoiceSequence, phone, promotionCode, shipping, source, taxExempt)) + } yield r + + + def delete(customerId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(customerId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + email: Option[String] = None): IO[ResponseError, StripeList[Customer]] = + for { + c <- client + r <- execute(c.list(created, config, email)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDiscounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDiscounts.scala new file mode 100644 index 0000000..63901c7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDiscounts.scala @@ -0,0 +1,36 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe.{Deleted, ResponseError, Stripe} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeDiscounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeDiscounts(config, logger, micrometer) + } +} + +case class LiveStripeDiscounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeDiscounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).discounts) + + def deleteCustomerDiscount(customerId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.deleteCustomerDiscount(customerId)) + } yield r + + + def deleteSubscriptionDiscount(subscriptionId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.deleteSubscriptionDiscount(subscriptionId)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDisputes.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDisputes.scala new file mode 100644 index 0000000..8195a5a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDisputes.scala @@ -0,0 +1,54 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.dispute.{Dispute, DisputeEvidence} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeDisputes { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeDisputes(config, logger, micrometer) + } +} + +case class LiveStripeDisputes(config: Config, logger: Logger, micrometer: Micrometer) extends StripeDisputes { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).disputes) + + def byId(disputeId: String): IO[ResponseError, Dispute] = + for { + c <- client + r <- execute(c.byId(disputeId)) + } yield r + + + def update(disputeId: String, + evidence: Option[DisputeEvidence] = None, + metadata: Map[String, String]): IO[ResponseError, Dispute] = + for { + c <- client + r <- execute(c.update(disputeId, evidence, metadata)) + } yield r + + + def close(disputeId: String): IO[ResponseError, Dispute] = + for { + c <- client + r <- execute(c.close(disputeId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Dispute]] = + for { + c <- client + r <- execute(c.list(created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeEvents.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeEvents.scala new file mode 100644 index 0000000..fbe6f91 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeEvents.scala @@ -0,0 +1,40 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.event.Event +import zio.{IO, ZIO, ZLayer} + +object LiveStripeEvents { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeEvents(config, logger, micrometer) + } +} + +case class LiveStripeEvents(config: Config, logger: Logger, micrometer: Micrometer) extends StripeEvents { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).events) + + def byId(eventId: String): IO[ResponseError, Event] = + for { + c <- client + r <- execute(c.byId(eventId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + `type`: Option[String] = None, + types: List[String] = Nil, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Event]] = + for { + c <- client + r <- execute(c.list(created, `type`, types, config)) + } yield r + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalBankAccounts.scala new file mode 100644 index 0000000..68188e5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalBankAccounts.scala @@ -0,0 +1,65 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.BankAccount +import zio.{IO, ZIO, ZLayer} + +object LiveStripeExternalBankAccounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeExternalBankAccounts(config, logger, micrometer) + } +} + +case class LiveStripeExternalBankAccounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeExternalBankAccounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).accounts.external.bankAccounts) + + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.create(accountId, source, externalAccount, defaultForCurrency ,metadata)) + } yield r + + + def byId(accountId: String, bankAccountId: String): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.byId(accountId, bankAccountId)) + } yield r + + + def update(accountId: String, + bankAccountId: String, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.update(accountId, bankAccountId, defaultForCurrency, metadata)) + } yield r + + + def delete(accountId: String, bankAccountId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(accountId, bankAccountId)) + } yield r + + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] = + for { + c <- client + r <- execute(c.list(accountId, config)) + } yield r + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalCreditCards.scala new file mode 100644 index 0000000..cba1383 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalCreditCards.scala @@ -0,0 +1,75 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.Card +import zio.{IO, ZIO, ZLayer} + +object LiveStripeExternalCreditCards { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeExternalCreditCards(config, logger, micrometer) + } +} + +case class LiveStripeExternalCreditCards(config: Config, logger: Logger, micrometer: Micrometer) extends StripeExternalCreditCards { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).accounts.external.cards) + + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.create(accountId, source, externalAccount, defaultForCurrency, metadata)) + } yield r + + + def byId(accountId: String, cardId: String): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.byId(accountId, cardId)) + } yield r + + + def update(accountId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.update(accountId, cardId, addressCity, addressCountry, addressLine1, addressLine2, addressState, addressZip, + defaultForCurrency, expMonth, expYear, metadata, name)) + } yield r + + + def delete(accountId: String, cardId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(accountId, cardId)) + } yield r + + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] = + for { + c <- client + r <- execute(c.list(accountId, config)) + } yield r + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoiceItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoiceItems.scala new file mode 100644 index 0000000..cb0df24 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoiceItems.scala @@ -0,0 +1,71 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.InvoiceItem +import zio.{IO, ZIO, ZLayer} + +object LiveStripeInvoiceItems { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeInvoiceItems(config, logger, micrometer) + } +} + +case class LiveStripeInvoiceItems(config: Config, logger: Logger, micrometer: Micrometer) extends StripeInvoiceItems { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).invoiceItems) + + def create(amount: Money, + currency: String, + customerId: String, + description: Option[String] = None, + discountable: Option[Boolean] = None, + invoice: Option[String] = None, + metadata: Map[String, String] = Map.empty, + subscription: Option[String] = None): IO[ResponseError, InvoiceItem] = + for { + c <- client + r <- execute(c.create(amount, currency, customerId, description, discountable, invoice, metadata, subscription)) + } yield r + + + def byId(invoiceItemId: String): IO[ResponseError, InvoiceItem] = + for { + c <- client + r <- execute(c.byId(invoiceItemId)) + } yield r + + + def update(invoiceItemId: String, + amount: Option[Money] = None, + description: Option[String] = None, + discountable: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, InvoiceItem] = + for { + c <- client + r <- execute(c.update(invoiceItemId, amount, description, discountable, metadata)) + } yield r + + + def delete(invoiceItemId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(invoiceItemId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceItem]] = + for { + c <- client + r <- execute(c.list(created, customer, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoices.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoices.scala new file mode 100644 index 0000000..f643ca1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoices.scala @@ -0,0 +1,104 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.{Invoice, InvoiceLine} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeInvoices { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeInvoices(config, logger, micrometer) + } +} + +case class LiveStripeInvoices(config: Config, logger: Logger, micrometer: Micrometer) extends StripeInvoices { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).invoices) + + def create(customerId: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + subscription: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.create(customerId, applicationFee, description, metadata, statementDescriptor, subscription, taxPercent)) + } yield r + + + def byId(invoiceId: String): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.byId(invoiceId)) + } yield r + + + def linesById(invoiceId: String, + coupon: Option[String] = None, + customer: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceLine]] = + for { + c <- client + r <- execute(c.linesById(invoiceId, coupon, customer, subscription, subscriptionPlan, subscriptionProrate, subscriptionProrationDate, subscriptionQuantity, + subscriptionTrialEnd, config)) + } yield r + + + def upcoming(customerId: String, + coupon: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.upcoming(customerId, coupon, subscription, subscriptionPlan, subscriptionProrate, subscriptionProrationDate, subscriptionQuantity, subscriptionTrialEnd)) + } yield r + + + def update(invoiceId: String, + applicationFee: Option[Money] = None, + closed: Option[Boolean] = None, + description: Option[String] = None, + forgiven: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.update(invoiceId, applicationFee, closed, description, forgiven, metadata, statementDescriptor, taxPercent)) + } yield r + + + def pay(invoiceId: String): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.pay(invoiceId)) + } yield r + + + def list(customerId: Option[String] = None, + date: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Invoice]] = + for { + c <- client + r <- execute(c.list(customerId, date, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePlans.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePlans.scala new file mode 100644 index 0000000..34edf8e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePlans.scala @@ -0,0 +1,73 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.Plan +import zio.{IO, ZIO, ZLayer} + +object LiveStripePlans { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripePlans(config, logger, micrometer) + } +} + +case class LiveStripePlans(config: Config, logger: Logger, micrometer: Micrometer) extends StripePlans { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).plans) + + def create(planId: String, + amount: Money, + currency: String, + interval: String, + productId: String, + intervalCount: Int = 1, + metadata: Map[String, String] = Map.empty, + nickname: Option[String], + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] = + for { + c <- client + r <- execute(c.create(planId, amount, currency, interval, productId, intervalCount, metadata, nickname, trialPeriodDays)) + } yield r + + + def byId(planId: String): IO[ResponseError, Plan] = + for { + c <- client + r <- execute(c.byId(planId)) + } yield r + + + def update(planId: String, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + productId: Option[String] = None, + statementDescriptor: Option[String] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] = + for { + c <- client + r <- execute(c.update(planId, metadata, name, productId, statementDescriptor, trialPeriodDays)) + } yield r + + + def delete(planId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(planId)) + } yield r + + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Plan]] = + for { + c <- client + r <- execute(c.list(active, created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePrices.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePrices.scala new file mode 100644 index 0000000..509bf45 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePrices.scala @@ -0,0 +1,82 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.price.{Price, Recurring, Tier, TransformQuantity} +import zio.{IO, ZIO, ZLayer} + +object LiveStripePrices { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripePrices(config, logger, micrometer) + } +} + +case class LiveStripePrices(config: Config, logger: Logger, micrometer: Micrometer) extends StripePrices { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).prices) + + def create(currency: String, + active: Option[Boolean] = None, + billingScheme: Option[String] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + recurring: Option[Recurring] = None, + tiers: List[Tier] = List(), + tiersMode: Option[String] = None, + transferLookupKey: Option[Boolean] = None, + transformQuantity: Option[TransformQuantity] = None, + unitAmount: Option[Int] = None, + unitAmountDecimal: Option[BigDecimal] = None): IO[ResponseError, Price] = + for { + c <- client + r <- execute(c.create(currency, active, billingScheme, lookupKey, metadata, nickname, recurring, tiers, tiersMode, transferLookupKey, transformQuantity, unitAmount, unitAmountDecimal)) + } yield r + + + def byId(priceId: String): IO[ResponseError, Price] = + for { + c <- client + r <- execute(c.byId(priceId)) + } yield r + + + def update(priceId: String, + active: Option[Boolean] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + transferLookupKey: Option[Boolean] = None): IO[ResponseError, Price] = + for { + c <- client + r <- execute(c.update(priceId, active, lookupKey, metadata, nickname, transferLookupKey)) + } yield r + + + def delete(priceId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(priceId)) + } yield r + + + def list(active: Option[Boolean] = None, + currency: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + endingBefore: Option[String] = None, + limit: Option[Int] = None, + productId: Option[String] = None, + `type`: Option[String] = None): IO[ResponseError, StripeList[Price]] = + for { + c <- client + r <- execute(c.list(active, currency, created, config, endingBefore, limit, productId, `type`)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeProducts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeProducts.scala new file mode 100644 index 0000000..bfd8064 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeProducts.scala @@ -0,0 +1,94 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.product.{PackageDimensions, Product => StripeProduct} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeProducts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeProducts(config, logger, micrometer) + } +} + +case class LiveStripeProducts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeProducts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).products) + + def create(name: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + packageDimensions: Option[PackageDimensions] = None, + productId: Option[String] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] = + for { + c <- client + r <- execute(c.create(name, active, attributes, caption, deactivateOn, description, images, liveMode, metadata, packageDimensions, productId, shippable, statementDescriptor, `type`, unitLabel, url)) + } yield r + + + def byId(productId: String): IO[ResponseError, StripeProduct] = + for { + c <- client + r <- execute(c.byId(productId)) + } yield r + + + def update(productId: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + packageDimensions: Option[PackageDimensions] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] = + for { + c <- client + r <- execute(c.update(productId, active, attributes, caption, deactivateOn, description, images, liveMode, metadata, name, packageDimensions, shippable, statementDescriptor, `type`, unitLabel, url)) + } yield r + + + def delete(productId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(productId)) + } yield r + + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + ids: List[String] = Nil, + shippable: Option[Boolean] = None, + `type`: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeList[StripeProduct]] = + for { + c <- client + r <- execute(c.list(active, created, config, ids, shippable, `type`, url)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeRefunds.scala new file mode 100644 index 0000000..ea109f6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeRefunds.scala @@ -0,0 +1,57 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.refund.Refund +import zio.{IO, ZIO, ZLayer} + +object LiveStripeRefunds { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeRefunds(config, logger, micrometer) + } +} + +case class LiveStripeRefunds(config: Config, logger: Logger, micrometer: Micrometer) extends StripeRefunds { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).refunds) + + def create(chargeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty, + reason: Option[String] = None, + refundApplicationFee: Boolean = false, + reverseTransfer: Boolean = false): IO[ResponseError, Refund] = + for { + c <- client + r <- execute(c.create(chargeId, amount, metadata, reason, refundApplicationFee, reverseTransfer)) + } yield r + + + def byId(refundId: String): IO[ResponseError, Refund] = + for { + c <- client + r <- execute(c.byId(refundId)) + } yield r + + + def update(refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, Refund] = + for { + c <- client + r <- execute(c.update(refundId, metadata)) + } yield r + + + def list(chargeId: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Refund]] = + for { + c <- client + r <- execute(c.list(chargeId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptionItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptionItems.scala new file mode 100644 index 0000000..3c379fa --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptionItems.scala @@ -0,0 +1,76 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.SubscriptionItem +import zio.{IO, ZIO, ZLayer} + +object LiveStripeSubscriptionItems { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeSubscriptionItems(config, logger, micrometer) + } +} + +case class LiveStripeSubscriptionItems(config: Config, logger: Logger, micrometer: Micrometer) extends StripeSubscriptionItems { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).subscriptionItems) + + def create(subscriptionId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] = + for { + c <- client + r <- execute(c.create(subscriptionId, billingThresholds, metadata, paymentBehavior, priceId, prorationBehavior, prorationDate, quantity, taxRates)) + } yield r + + + def byId(subscriptionItemId: String): IO[ResponseError, SubscriptionItem] = + for { + c <- client + r <- execute(c.byId(subscriptionItemId)) + } yield r + + + def update(subscriptionItemId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + offSession: Option[Boolean] = None, + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] = + for { + c <- client + r <- execute(c.update(subscriptionItemId, billingThresholds, metadata, offSession, paymentBehavior, priceId, prorationBehavior, prorationDate, quantity, taxRates)) + } yield r + + + def delete(subscriptionItemId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(subscriptionItemId)) + } yield r + + + def list(subscription: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[SubscriptionItem]] = + for { + c <- client + r <- execute(c.list(subscription, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptions.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptions.scala new file mode 100644 index 0000000..e27dca7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptions.scala @@ -0,0 +1,85 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.{CreateSubscriptionItem, Subscription} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeSubscriptions { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeSubscriptions(config, logger, micrometer) + } +} + +case class LiveStripeSubscriptions(config: Config, logger: Logger, micrometer: Micrometer) extends StripeSubscriptions { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).subscriptions) + + def create(customerId: String, + items: List[CreateSubscriptionItem], + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal] = None, + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.create(customerId, items, applicationFeePercent, coupon, metadata, prorate, quantity, source, taxPercent, trialEnd, trialPeriodDays)) + } yield r + + + def byId(subscriptionId: String): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.byId(subscriptionId)) + } yield r + + + def update(subscriptionId: String, + items: List[CreateSubscriptionItem] = List(), + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal], + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.update(subscriptionId, items, applicationFeePercent, coupon, metadata, prorate, prorationDate, quantity, source, taxPercent, trialEnd, trialPeriodDays)) + } yield r + + + def cancel(customerId: String, + subscriptionId: String, + atPeriodEnd: Boolean = false): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.cancel(customerId, subscriptionId, atPeriodEnd)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + plan: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Subscription]] = + for { + c <- client + r <- execute(c.list(created, customer, plan, status, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTokens.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTokens.scala new file mode 100644 index 0000000..d0f2feb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTokens.scala @@ -0,0 +1,41 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe.charge.{BankAccount, Card, PII} +import com.outr.stripe.token.Token +import com.outr.stripe.{ResponseError, Stripe} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeTokens { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeTokens(config, logger, micrometer) + } +} + +case class LiveStripeTokens(config: Config, logger: Logger, micrometer: Micrometer) extends StripeTokens { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).tokens) + + def create(card: Option[Card] = None, + bankAccount: Option[BankAccount] = None, + pii: Option[PII] = None, + customerId: Option[String] = None): IO[ResponseError, Token] = + for { + c <- client + r <- execute(c.create(card, bankAccount, pii, customerId)) + } yield r + + + def byId(tokenId: String): IO[ResponseError, Token] = + for { + c <- client + r <- execute(c.byId(tokenId)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransferReversals.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransferReversals.scala new file mode 100644 index 0000000..55e6c0a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransferReversals.scala @@ -0,0 +1,59 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.transfer.TransferReversal +import zio.{IO, ZIO, ZLayer} + +object LiveStripeTransferReversals { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeTransferReversals(config, logger, micrometer) + } +} + +case class LiveStripeTransferReversals(config: Config, logger: Logger, micrometer: Micrometer) extends StripeTransferReversals { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).transfers.reversals) + + def create(transferId: String, + amount: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + refundApplicationFee: Boolean = false): IO[ResponseError, TransferReversal] = + for { + c <- client + r <- execute(c.create(transferId, amount, description, metadata, refundApplicationFee)) + } yield r + + + def byId(transferId: String, transferReversalId: String): IO[ResponseError, TransferReversal] = + for { + c <- client + r <- execute(c.byId(transferId, transferReversalId)) + } yield r + + + def update(transferId: String, + transferReversalId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, TransferReversal] = + for { + c <- client + r <- execute(c.update(transferId, transferReversalId, description, metadata)) + } yield r + + + def list(transferId: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[TransferReversal]] = + for { + c <- client + r <- execute(c.list(transferId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransfers.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransfers.scala new file mode 100644 index 0000000..352e916 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransfers.scala @@ -0,0 +1,67 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.transfer.Transfer +import zio.{IO, ZIO, ZLayer} + +object LiveStripeTransfers { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeTransfers(config, logger, micrometer) + } +} + +case class LiveStripeTransfers(config: Config, logger: Logger, micrometer: Micrometer) extends StripeTransfers { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).transfers) + + def create(amount: Money, + currency: String, + destination: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + sourceTransaction: Option[String] = None, + statementDescriptor: Option[String] = None, + sourceType: String = "card", + method: String = "standard"): IO[ResponseError, Transfer] = + for { + c <- client + r <- execute(c.create(amount, currency, destination, applicationFee, description, metadata, sourceTransaction, statementDescriptor, sourceType, method)) + } yield r + + + def byId(transferId: String): IO[ResponseError, Transfer] = + for { + c <- client + r <- execute(c.byId(transferId)) + } yield r + + + def update(transferId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Transfer] = + for { + c <- client + r <- execute(c.update(transferId, description, metadata)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + date: Option[TimestampFilter] = None, + destination: Option[String] = None, + recipient: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Transfer]] = + for { + c <- client + r <- execute(c.list(created, date, destination, recipient, status, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeUI.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeUI.scala new file mode 100644 index 0000000..9187439 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeUI.scala @@ -0,0 +1,50 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{Task, ZIO, ZLayer} + +object LiveStripeUI { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeUI(config, http, logger, micrometer) + } +} + +case class LiveStripeUI(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends StripeUI { + + def billingPortalUrl(customerId: String, returnUrl: String): Task[String] = + for { + apiKey <- config.secret("stripe-secret-key") + formBody = Map( + "customer" -> customerId, + "return_url" -> returnUrl + ) + response <- http.postFormAsJson("https://api.stripe.com/v1/billing_portal/sessions", formBody, credentials = Some((apiKey, ""))).mapError(e => new Exception(e.toString)) + url <- ZIO.fromTry(response.hcursor.downField("url").as[String].toTry) + } yield url + + + def createCheckoutSession(customerId: String, priceId: String, successUrl: String, cancelUrl: String): Task[String] = + for { + apiKey <- config.secret("stripe-secret-key") + formBody = Map( + "cancel_url" -> cancelUrl, + "customer" -> customerId, + "line_items[][price]" -> priceId, + "line_items[][quantity]" -> "1", + "mode" -> "subscription", + "payment_method_types[]" -> "card", + "success_url" -> successUrl + ) + response <- http.postFormAsJson("https://api.stripe.com/v1/checkout/sessions", formBody, credentials = Some((apiKey, ""))).mapError(e => new Exception(e.toString)) + id <- ZIO.fromTry(response.hcursor.downField("id").as[String].toTry) + } yield id + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeAccounts.scala new file mode 100644 index 0000000..c0be14f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeAccounts.scala @@ -0,0 +1,49 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect._ +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeAccounts { + def create(country: Option[String] = None, + email: Option[String] = None, + custom: Boolean = false, + accountToken: Option[String] = None, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessURL: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + tosAcceptance: Option[Acceptance] = None): IO[ResponseError, Account] + + def byId(accountId: String): IO[ResponseError, Account] + + def update(accountId: String, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessUrl: Option[String] = None, + debitNegativeBalances: Option[Boolean] = None, + declineChargeOn: Option[DeclineChargeOn] = None, + defaultCurrency: Option[String] = None, + email: Option[String] = None, + externalAccount: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + metadata: Map[String, String] = Map.empty, + productDescription: Option[String] = None, + statementDescriptor: Option[String] = None, + supportEmail: Option[String] = None, + supportPhone: Option[String] = None, + supportUrl: Option[String] = None, + tosAcceptance: Option[Acceptance] = None, + transferSchedule: Option[TransferSchedule] = None, + transferStatementDescriptor: Option[String] = None): IO[ResponseError, Account] + + def delete(accountId: String): IO[ResponseError, Deleted] + + def reject(accountId: String, reason: String): IO[ResponseError, Account] + + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Account]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFeeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFeeRefunds.scala new file mode 100644 index 0000000..c67d709 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFeeRefunds.scala @@ -0,0 +1,19 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect.FeeRefund +import com.outr.stripe.{Money, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeApplicationFeeRefunds { + def create(feeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] + + def byId(feeId: String, refundId: String): IO[ResponseError, FeeRefund] + + def update(feeId: String, refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] + + def list(feeId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[FeeRefund]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFees.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFees.scala new file mode 100644 index 0000000..8cc04eb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFees.scala @@ -0,0 +1,15 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect.ApplicationFee +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeApplicationFees { + def byId(feeId: String): IO[ResponseError, ApplicationFee] + + def list(charge: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[ApplicationFee]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeBalance.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeBalance.scala new file mode 100644 index 0000000..ee4f985 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeBalance.scala @@ -0,0 +1,21 @@ +package com.harana.modules.stripe + +import com.outr.stripe.balance.{Balance, BalanceTransaction} +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeBalance { + def get: IO[ResponseError, Balance] + + def byId(id: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, BalanceTransaction] + + def list(availableOn: Option[TimestampFilter] = None, + created: Option[TimestampFilter] = None, + currency: Option[String] = None, + source: Option[String] = None, + transfer: Option[String] = None, + `type`: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BalanceTransaction]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCharges.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCharges.scala new file mode 100644 index 0000000..896780a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCharges.scala @@ -0,0 +1,42 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.charge.{Charge, FraudDetails, Shipping} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCharges { + def create(amount: Money, + currency: String, + applicationFee: Option[Money] = None, + capture: Boolean = true, + description: Option[String] = None, + destination: Option[String] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None, + customer: Option[String] = None, + source: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] + + def byId(chargeId: String): IO[ResponseError, Charge] + + def update(chargeId: String, + description: Option[String] = None, + fraudDetails: Option[FraudDetails] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None): IO[ResponseError, Charge] + + def capture(chargeId: String, + amount: Option[Money] = None, + applicationFee: Option[Money] = None, + receiptEmail: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + source: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Charge]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCountrySpecs.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCountrySpecs.scala new file mode 100644 index 0000000..134e981 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCountrySpecs.scala @@ -0,0 +1,13 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect.CountrySpec +import com.outr.stripe.{QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCountrySpecs { + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[CountrySpec]] + + def byId(countryCode: String): IO[ResponseError, CountrySpec] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCoupons.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCoupons.scala new file mode 100644 index 0000000..ea33f17 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCoupons.scala @@ -0,0 +1,28 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.Coupon +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCoupons { + def create(couponId: String, + duration: String, + amountOff: Option[Money] = None, + currency: Option[String] = None, + durationInMonths: Option[Int] = None, + maxRedemptions: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + percentOff: Option[Int] = None, + redeemBy: Option[Long] = None): IO[ResponseError, Coupon] + + def byId(couponId: String): IO[ResponseError, Coupon] + + def update(couponId: String, metadata: Map[String, String]): IO[ResponseError, Coupon] + + def delete(couponId: String): IO[ResponseError, Deleted] + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Coupon]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerBankAccounts.scala new file mode 100644 index 0000000..61781ca --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerBankAccounts.scala @@ -0,0 +1,32 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.charge.BankAccount +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCustomerBankAccounts { + def create(customerId: String, + source: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def byId(customerId: String, bankAccountId: String): IO[ResponseError, BankAccount] + + def update(customerId: String, + bankAccountId: String, + accountHolderName: Option[String] = None, + accountHolderType: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def verify(customerId: String, + bankAccountId: String, + amount1: Option[Money] = None, + amount2: Option[Money] = None, + verificationMethod: Option[String] = None): IO[ResponseError, BankAccount] + + def delete(customerId: String, bankAccountId: String): IO[ResponseError, Deleted] + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerCreditCards.scala new file mode 100644 index 0000000..cd7c3d1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerCreditCards.scala @@ -0,0 +1,35 @@ +package com.harana.modules.stripe + +import com.outr.stripe.charge.Card +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCustomerCreditCards { + def create(customerId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] + + def byId(customerId: String, cardId: String): IO[ResponseError, Card] + + def update(customerId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] + + def delete(customerId: String, cardId: String): IO[ResponseError, Deleted] + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomers.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomers.scala new file mode 100644 index 0000000..01c4fc7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomers.scala @@ -0,0 +1,51 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.charge.{Address, Card, Shipping} +import com.outr.stripe.customer.Customer +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCustomers { + def create(address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + paymentMethodId: Option[String] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] + + def byId(customerId: String): IO[ResponseError, Customer] + + def update(customerId: String, + address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + defaultSource: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] + + def delete(customerId: String): IO[ResponseError, Deleted] + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + email: Option[String] = None): IO[ResponseError, StripeList[Customer]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeDiscounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeDiscounts.scala new file mode 100644 index 0000000..29911fc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeDiscounts.scala @@ -0,0 +1,12 @@ +package com.harana.modules.stripe + +import com.outr.stripe.{Deleted, ResponseError} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeDiscounts { + def deleteCustomerDiscount(customerId: String): IO[ResponseError, Deleted] + + def deleteSubscriptionDiscount(subscriptionId: String): IO[ResponseError, Deleted] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeDisputes.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeDisputes.scala new file mode 100644 index 0000000..62048ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeDisputes.scala @@ -0,0 +1,20 @@ +package com.harana.modules.stripe + +import com.outr.stripe.dispute.{Dispute, DisputeEvidence} +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeDisputes { + def byId(disputeId: String): IO[ResponseError, Dispute] + + def update(disputeId: String, + evidence: Option[DisputeEvidence] = None, + metadata: Map[String, String]): IO[ResponseError, Dispute] + + def close(disputeId: String): IO[ResponseError, Dispute] + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Dispute]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeEvents.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeEvents.scala new file mode 100644 index 0000000..f9b6665 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeEvents.scala @@ -0,0 +1,16 @@ +package com.harana.modules.stripe + +import com.outr.stripe.event.Event +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeEvents { + def byId(eventId: String): IO[ResponseError, Event] + + def list(created: Option[TimestampFilter] = None, + `type`: Option[String] = None, + types: List[String] = Nil, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Event]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalBankAccounts.scala new file mode 100644 index 0000000..845736f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalBankAccounts.scala @@ -0,0 +1,26 @@ +package com.harana.modules.stripe + +import com.outr.stripe.charge.BankAccount +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeExternalBankAccounts { + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def byId(accountId: String, bankAccountId: String): IO[ResponseError, BankAccount] + + def update(accountId: String, + bankAccountId: String, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def delete(accountId: String, bankAccountId: String): IO[ResponseError, Deleted] + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalCreditCards.scala new file mode 100644 index 0000000..404f63b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalCreditCards.scala @@ -0,0 +1,35 @@ +package com.harana.modules.stripe + +import com.outr.stripe.charge.Card +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeExternalCreditCards { + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] + + def byId(accountId: String, cardId: String): IO[ResponseError, Card] + + def update(accountId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] + + def delete(accountId: String, cardId: String): IO[ResponseError, Deleted] + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoiceItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoiceItems.scala new file mode 100644 index 0000000..5bad7b2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoiceItems.scala @@ -0,0 +1,32 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.InvoiceItem +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeInvoiceItems { + def create(amount: Money, + currency: String, + customerId: String, + description: Option[String] = None, + discountable: Option[Boolean] = None, + invoice: Option[String] = None, + metadata: Map[String, String] = Map.empty, + subscription: Option[String] = None): IO[ResponseError, InvoiceItem] + + def byId(invoiceItemId: String): IO[ResponseError, InvoiceItem] + + def update(invoiceItemId: String, + amount: Option[Money] = None, + description: Option[String] = None, + discountable: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, InvoiceItem] + + def delete(invoiceItemId: String): IO[ResponseError, Deleted] + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceItem]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoices.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoices.scala new file mode 100644 index 0000000..c722533 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoices.scala @@ -0,0 +1,54 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.{Invoice, InvoiceLine} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeInvoices { + def create(customerId: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + subscription: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] + + def byId(invoiceId: String): IO[ResponseError, Invoice] + + def linesById(invoiceId: String, + coupon: Option[String] = None, + customer: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceLine]] + + def upcoming(customerId: String, + coupon: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None): IO[ResponseError, Invoice] + + def update(invoiceId: String, + applicationFee: Option[Money] = None, + closed: Option[Boolean] = None, + description: Option[String] = None, + forgiven: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] + + def pay(invoiceId: String): IO[ResponseError, Invoice] + + def list(customerId: Option[String] = None, + date: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Invoice]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripePlans.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripePlans.scala new file mode 100644 index 0000000..6c8f268 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripePlans.scala @@ -0,0 +1,34 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.Plan +import zio.IO +import zio.macros.accessible + +@accessible +trait StripePlans { + def create(planId: String, + amount: Money, + currency: String, + interval: String, + productId: String, + intervalCount: Int = 1, + metadata: Map[String, String] = Map.empty, + nickname: Option[String], + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] + + def byId(planId: String): IO[ResponseError, Plan] + + def update(planId: String, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + productId: Option[String] = None, + statementDescriptor: Option[String] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] + + def delete(planId: String): IO[ResponseError, Deleted] + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Plan]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripePrices.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripePrices.scala new file mode 100644 index 0000000..0b6fc87 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripePrices.scala @@ -0,0 +1,47 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.price.{Price, Recurring, Tier, TransformQuantity} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripePrices { + def create(currency: String, + active: Option[Boolean] = None, + billingScheme: Option[String] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + recurring: Option[Recurring] = None, + tiers: List[Tier] = List(), + tiersMode: Option[String] = None, + transferLookupKey: Option[Boolean] = None, + transformQuantity: Option[TransformQuantity] = None, + unitAmount: Option[Int] = None, + unitAmountDecimal: Option[BigDecimal] = None): IO[ResponseError, Price] + + + def byId(priceId: String): IO[ResponseError, Price] + + + def update(priceId: String, + active: Option[Boolean] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + transferLookupKey: Option[Boolean] = None): IO[ResponseError, Price] + + + def delete(planId: String): IO[ResponseError, Deleted] + + + def list(active: Option[Boolean] = None, + currency: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + endingBefore: Option[String] = None, + limit: Option[Int] = None, + productId: Option[String] = None, + `type`: Option[String] = None): IO[ResponseError, StripeList[Price]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeProducts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeProducts.scala new file mode 100644 index 0000000..01517dd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeProducts.scala @@ -0,0 +1,55 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.product.{PackageDimensions, Product => StripeProduct} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeProducts { + def create(name: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + packageDimensions: Option[PackageDimensions] = None, + productId: Option[String] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] + + def byId(productId: String): IO[ResponseError, StripeProduct] + + def update(productId: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + packageDimensions: Option[PackageDimensions] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] + + def delete(productId: String): IO[ResponseError, Deleted] + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + ids: List[String] = Nil, + shippable: Option[Boolean] = None, + `type`: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeList[StripeProduct]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeRefunds.scala new file mode 100644 index 0000000..55bb324 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeRefunds.scala @@ -0,0 +1,23 @@ +package com.harana.modules.stripe + +import com.outr.stripe.refund.Refund +import com.outr.stripe.{Money, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeRefunds { + def create(chargeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty, + reason: Option[String] = None, + refundApplicationFee: Boolean = false, + reverseTransfer: Boolean = false): IO[ResponseError, Refund] + + def byId(refundId: String): IO[ResponseError, Refund] + + def update(refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, Refund] + + def list(chargeId: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Refund]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptionItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptionItems.scala new file mode 100644 index 0000000..9d807e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptionItems.scala @@ -0,0 +1,37 @@ +package com.harana.modules.stripe + +import com.outr.stripe.subscription.SubscriptionItem +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeSubscriptionItems { + def create(subscriptionId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] + + def byId(subscriptionItemId: String): IO[ResponseError, SubscriptionItem] + + def update(subscriptionItemId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + offSession: Option[Boolean] = None, + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] + + def delete(subscriptionItemId: String): IO[ResponseError, Deleted] + + def list(subscription: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[SubscriptionItem]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptions.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptions.scala new file mode 100644 index 0000000..8845267 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptions.scala @@ -0,0 +1,46 @@ +package com.harana.modules.stripe + +import com.outr.stripe.subscription.{CreateSubscriptionItem, Subscription} +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeSubscriptions { + def create(customerId: String, + items: List[CreateSubscriptionItem], + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal] = None, + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] + + def byId(subscriptionId: String): IO[ResponseError, Subscription] + + def update(subscriptionId: String, + items: List[CreateSubscriptionItem] = List(), + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal], + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] + + def cancel(customerId: String, + subscriptionId: String, + atPeriodEnd: Boolean = false): IO[ResponseError, Subscription] + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + plan: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Subscription]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeTokens.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeTokens.scala new file mode 100644 index 0000000..98e1ace --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeTokens.scala @@ -0,0 +1,17 @@ +package com.harana.modules.stripe + +import com.outr.stripe.ResponseError +import com.outr.stripe.charge.{BankAccount, Card, PII} +import com.outr.stripe.token.Token +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeTokens { + def create(card: Option[Card] = None, + bankAccount: Option[BankAccount] = None, + pii: Option[PII] = None, + customerId: Option[String] = None): IO[ResponseError, Token] + + def byId(tokenId: String): IO[ResponseError, Token] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeTransferReversals.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransferReversals.scala new file mode 100644 index 0000000..6e650e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransferReversals.scala @@ -0,0 +1,25 @@ +package com.harana.modules.stripe + +import com.outr.stripe.transfer.TransferReversal +import com.outr.stripe.{Money, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeTransferReversals { + def create(transferId: String, + amount: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + refundApplicationFee: Boolean = false): IO[ResponseError, TransferReversal] + + def byId(transferId: String, transferReversalId: String): IO[ResponseError, TransferReversal] + + def update(transferId: String, + transferReversalId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, TransferReversal] + + def list(transferId: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[TransferReversal]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeTransfers.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransfers.scala new file mode 100644 index 0000000..ac1b26a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransfers.scala @@ -0,0 +1,33 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.transfer.Transfer +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeTransfers { + def create(amount: Money, + currency: String, + destination: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + sourceTransaction: Option[String] = None, + statementDescriptor: Option[String] = None, + sourceType: String = "card", + method: String = "standard"): IO[ResponseError, Transfer] + + def byId(transferId: String): IO[ResponseError, Transfer] + + def update(transferId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Transfer] + + def list(created: Option[TimestampFilter] = None, + date: Option[TimestampFilter] = None, + destination: Option[String] = None, + recipient: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Transfer]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeUI.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeUI.scala new file mode 100644 index 0000000..a61c1e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeUI.scala @@ -0,0 +1,11 @@ +package com.harana.modules.stripe + +import zio.Task +import zio.macros.accessible + +@accessible +trait StripeUI { + def billingPortalUrl(customerId: String, returnUrl: String): Task[String] + + def createCheckoutSession(customerId: String, priceId: String, successUrl: String, cancelUrl: String): Task[String] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/package.scala b/jvm/src/main/scala/com/harana/modules/stripe/package.scala new file mode 100644 index 0000000..d4b7cf1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/package.scala @@ -0,0 +1,15 @@ +package com.harana.modules + +import zio.{IO, ZIO} + +import scala.concurrent.Future + +package object stripe { + + def execute[E, A](output: Future[Either[E, A]]): IO[E, A] = + ZIO.succeed(output).flatMap { o => + ZIO.fromFuture { _ => + o + }.orDie.absolve + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/thumbnailator/LiveThumbnailator.scala b/jvm/src/main/scala/com/harana/modules/thumbnailator/LiveThumbnailator.scala new file mode 100644 index 0000000..cbc18de --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/thumbnailator/LiveThumbnailator.scala @@ -0,0 +1,44 @@ +package com.harana.modules.thumbnailator + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.thumbnailator.streams.ByteBufferInputStream +import com.harana.modules.vertx.models.streams.VertxBufferOutputStream +import io.vertx.core.buffer.Buffer +import net.coobird.thumbnailator.Thumbnails +import zio.{Task, ZIO, ZLayer} + +import java.nio.ByteBuffer +import scala.jdk.CollectionConverters._ + +object LiveThumbnailator { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveThumbnailator(config, logger, micrometer) + } +} + +case class LiveThumbnailator(config: Config, logger: Logger, micrometer: Micrometer) extends Thumbnailator { + + def thumbnailAsVertxBuffer(byteBuffer: ByteBuffer, + width: Option[Int] = None, + height: Option[Int] = None, + keepAspectRatio: Boolean = true, + outputFormat: String = "JPEG"): Task[Buffer] = + ZIO.attempt { + val builder = Thumbnails.fromInputStreams(Seq(new ByteBufferInputStream(byteBuffer)).asJava) + val os = new VertxBufferOutputStream + + if (width.nonEmpty) builder.width(width.get) + if (height.nonEmpty) builder.height(height.get) + builder.keepAspectRatio(keepAspectRatio) + builder.outputFormat(outputFormat) + builder.toOutputStream(os) + os.buffer + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/thumbnailator/Thumbnailator.scala b/jvm/src/main/scala/com/harana/modules/thumbnailator/Thumbnailator.scala new file mode 100644 index 0000000..3282daf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/thumbnailator/Thumbnailator.scala @@ -0,0 +1,17 @@ +package com.harana.modules.thumbnailator + +import io.vertx.core.buffer.Buffer +import zio.Task +import zio.macros.accessible + +import java.nio.ByteBuffer + +@accessible +trait Thumbnailator { + + def thumbnailAsVertxBuffer(byteBuffer: ByteBuffer, + width: Option[Int] = None, + height: Option[Int] = None, + keepAspectRatio: Boolean = true, + outputFormat: String = "JPEG"): Task[Buffer] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/thumbnailator/streams/ByteBufferInputStream.scala b/jvm/src/main/scala/com/harana/modules/thumbnailator/streams/ByteBufferInputStream.scala new file mode 100644 index 0000000..a6482c8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/thumbnailator/streams/ByteBufferInputStream.scala @@ -0,0 +1,49 @@ +package com.harana.modules.thumbnailator.streams + +import java.io.InputStream +import java.nio.ByteBuffer + +/** + * Wraps a ByteBuffer so it can be used with interfaces that require an InputStream. The + * buffer should not be modified outside of the reader until reading is complete. + */ +class ByteBufferInputStream(buffer: ByteBuffer) extends InputStream { + + override def read(): Int = { + if (buffer.hasRemaining) buffer.get() else -1 + } + + override def read(buf: Array[Byte], offset: Int, length: Int): Int = { + if (buffer.hasRemaining) { + val readLength = math.min(buffer.remaining(), length) + buffer.get(buf, offset, readLength) + readLength + } else { + -1 + } + } + + override def available(): Int = { + buffer.remaining() + } + + override def skip(n: Long): Long = { + val skipAmount = math.min(buffer.remaining(), n).toInt + buffer.position(buffer.position() + skipAmount) + skipAmount + } + + override def markSupported(): Boolean = true + + override def mark(readlimit: Int): Unit = { + buffer.mark() + } + + override def reset(): Unit = { + buffer.reset() + } + + override def close(): Unit = { + buffer.flip() + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/Handlers.scala b/jvm/src/main/scala/com/harana/modules/vertx/Handlers.scala new file mode 100644 index 0000000..9650f2d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/Handlers.scala @@ -0,0 +1,80 @@ +package com.harana.modules.vertx + +import io.vertx.core.buffer.Buffer +import io.vertx.core.json.JsonObject +import io.vertx.core.{AsyncResult, Handler, Vertx => VX} +import io.vertx.ext.bridge.PermittedOptions +import io.vertx.ext.web.RoutingContext +import io.vertx.ext.web.handler.sockjs.{SockJSBridgeOptions, SockJSHandler} +import io.vertx.ext.web.templ.handlebars.HandlebarsTemplateEngine +import org.jose4j.jwk.{JsonWebKey, JsonWebKeySet} +import org.pac4j.core.config.Config +import org.pac4j.core.context.session.SessionStore +import org.pac4j.core.exception.http.HttpAction +import org.pac4j.core.util.Pac4jConstants +import org.pac4j.http.client.indirect.FormClient +import org.pac4j.vertx.VertxWebContext +import org.pac4j.vertx.handler.impl.{LogoutHandler, LogoutHandlerOptions} + +import javax.ws.rs.core.{HttpHeaders, MediaType} + +object Handlers { + + def sock(vx: VX, inboundPermitted: List[String], outboundPermitted: List[String]) = { + val bridgeOptions = new SockJSBridgeOptions() + inboundPermitted.foreach(regex => bridgeOptions.addInboundPermitted(new PermittedOptions().setAddressRegex(regex))) + outboundPermitted.foreach(regex => bridgeOptions.addOutboundPermitted(new PermittedOptions().setAddressRegex(regex))) + SockJSHandler.create(vx).bridge(bridgeOptions) + } + + + def jwks(jwks: JsonWebKeySet): Handler[RoutingContext] = { + rc: RoutingContext => { + rc.response + .putHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .end(jwks.toJson(JsonWebKey.OutputControlLevel.PUBLIC_ONLY)) + } + } + + + def forceLogin(config: Config, sessionStore: SessionStore): Handler[RoutingContext] = { + rc: RoutingContext => { + val context = new VertxWebContext(rc, sessionStore) + try { + val client = config.getClients.findClient(context.getRequestParameter(Pac4jConstants.DEFAULT_CLIENT_NAME_PARAMETER).get) + val action = client.get.getRedirectionAction(context, sessionStore) + val adapter = config.getHttpActionAdapter + adapter.adapt(action.get, context) + } catch { + case h: HttpAction => rc.fail(h) + } + } + } + + + def loginForm(vx: VX, config: Config, templateFileName: String, parameters: Map[String, AnyRef]): Handler[RoutingContext] = { + rc: RoutingContext => { + val url = config.getClients.findClient("FormClient").get.asInstanceOf[FormClient].getCallbackUrl + template(vx, rc, templateFileName, parameters ++ Map("url" -> url)) + } + } + + + def centralLogout(vx: VX, config: Config, sessionStore: SessionStore, postLogoutUrl: String): Handler[RoutingContext] = { + val options = new LogoutHandlerOptions().setCentralLogout(true).setLocalLogout(false).setDefaultUrl(postLogoutUrl) + new LogoutHandler(vx, sessionStore, options, config) + } + + + def template(vx: VX, rc: RoutingContext, templateFileName: String, parameters: Map[String, AnyRef]): Unit = { + val engine = HandlebarsTemplateEngine.create(vx) + val json = new JsonObject() + parameters.foreach { p => json.put(p._1, p._2) } + engine.render(json, templateFileName, new Handler[AsyncResult[Buffer]] { + override def handle(result: AsyncResult[Buffer]): Unit = { + if (result.succeeded()) rc.response.end(result.result()) else rc.fail(result.cause()) + } + }) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/LiveVertx.scala b/jvm/src/main/scala/com/harana/modules/vertx/LiveVertx.scala new file mode 100644 index 0000000..71d9086 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/LiveVertx.scala @@ -0,0 +1,515 @@ +package com.harana.modules.vertx + +import com.harana.modules.core.app.App.runEffect +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.vertx.gc.GCHealthCheck +import com.harana.modules.vertx.models._ +import com.harana.modules.vertx.proxy.{WSURI, WebProxyClient, WebProxyClientOptions} +import io.vertx.core.eventbus._ +import io.vertx.core.file.FileSystemOptions +import io.vertx.core.http.{HttpServer, HttpServerOptions, WebSocket} +import io.vertx.core.json.JsonObject +import io.vertx.core.net.{JksOptions, NetServer, NetServerOptions} +import io.vertx.core.shareddata.{AsyncMap, Counter, Lock} +import io.vertx.core.{AsyncResult, Context, Handler, VertxOptions, Vertx => VX} +import io.vertx.ext.bridge.{BridgeOptions, PermittedOptions} +import io.vertx.ext.eventbus.bridge.tcp.TcpEventBusBridge +import io.vertx.ext.web.client.{WebClient, WebClientOptions} +import io.vertx.ext.web.handler.{BodyHandler, CorsHandler, SessionHandler} +import io.vertx.ext.web.sstore.cookie.CookieSessionStore +import io.vertx.ext.web.templ.handlebars.HandlebarsTemplateEngine +import io.vertx.ext.web.{Router, RoutingContext} +import io.vertx.micrometer.{MicrometerMetricsOptions, PrometheusScrapingHandler, VertxPrometheusOptions} +import io.vertx.servicediscovery.{Record, ServiceDiscovery} +import io.vertx.spi.cluster.zookeeper.ZookeeperClusterManager +import org.jose4j.jwk.JsonWebKeySet +import org.pac4j.core.client.Clients +import org.pac4j.core.config.{Config => Pac4jConfig} +import org.pac4j.core.profile.UserProfile +import org.pac4j.vertx.context.session.VertxSessionStore +import org.pac4j.vertx.handler.impl._ +import org.pac4j.vertx.http.VertxHttpActionAdapter +import org.pac4j.vertx.{VertxProfileManager, VertxWebContext} +import zio.{Runtime, Task, UIO, Unsafe, ZIO, ZLayer} + +import java.io.File +import java.net.URI +import java.nio.file.Files +import java.util.Base64 +import java.util.concurrent.atomic.AtomicReference +import scala.collection.concurrent.{TrieMap, Map => ConcurrentMap} +import scala.compat.java8.FunctionConverters.asJavaFunction +import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ + +object LiveVertx { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveVertx(config, logger, micrometer) + } +} + +case class LiveVertx(config: Config, logger: Logger, micrometer: Micrometer) extends Vertx { + + System.setProperty("org.jboss.logging.provider", "log4j2") + System.setProperty("vertx.logger-delegate-factory-class-name", "io.vertx.core.logging.Log4j2LogDelegateFactory") + + private val vertxRef = new AtomicReference[VX](null) + private val serviceDiscoveryRef = new AtomicReference[Option[ServiceDiscovery]](None) + private val serviceDiscoveryListeners: ConcurrentMap[String, Record => Unit] = TrieMap.empty + + private def vertx(clustered: Boolean) = + for { + vertxBlockedThreads <- config.long("vertx.blockedThreadsCheckInterval", 10000L) + + zookeeperHost <- config.optSecret("zookeeper-host") + zookeeperPrefix <- config.optString("zookeeper.prefix") + + listenHost <- config.string("http.listenHost") + publicHost <- config.string("http.publicHost", sys.env.getOrElse("POD_IP", listenHost)) + eventBusPort <- config.int("http.eventBusPort", 10000) + + eventBusOptions = new EventBusOptions() + .setClusterPublicHost(publicHost) + .setClusterPublicPort(eventBusPort) + .setLogActivity(true) + + fileSystemOptions = new FileSystemOptions().setFileCachingEnabled(false) + + registry <- micrometer.registry + + clusterManager <- if (zookeeperHost.nonEmpty) ZIO.some { + val zkConfig = new JsonObject() + zkConfig.put("zookeeperHosts", zookeeperHost) + zkConfig.put("rootPath", zookeeperPrefix.map(p => s"$p.vertx").getOrElse("vertx")) + new ZookeeperClusterManager(zkConfig) + } else ZIO.none + + vertxOptions = new VertxOptions() + .setBlockedThreadCheckInterval(vertxBlockedThreads) + .setEventBusOptions(eventBusOptions) + .setFileSystemOptions(fileSystemOptions) + .setMetricsOptions(new MicrometerMetricsOptions() + .setMicrometerRegistry(registry) + .setPrometheusOptions(new VertxPrometheusOptions().setEnabled(true)).setEnabled(true) + ) + + vx <- ZIO.async { (cb: Task[VX] => Unit) => + if (clustered) + VX.clusteredVertx( + vertxOptions.setClusterManager(clusterManager.get), + (result: AsyncResult[VX]) => if (result.succeeded()) cb(ZIO.succeed(result.result)) else cb(ZIO.fail(result.cause())) + ) + else + cb(ZIO.succeed(VX.vertx(vertxOptions))) + } + } yield vx + + + private def serviceDiscovery: Task[ServiceDiscovery] = + for { + serviceDiscovery <- if (serviceDiscoveryRef.get.nonEmpty) ZIO.attempt(serviceDiscoveryRef.get.get) else ZIO.attempt(ServiceDiscovery.create(vertxRef.get())) + _ = serviceDiscoveryRef.set(Some(serviceDiscovery)) + } yield serviceDiscovery + + + def underlying: UIO[VX] = + ZIO.succeed(vertxRef.get) + + + def subscribe(address: Address, `type`: String, onMessage: String => Task[Unit]): Task[MessageConsumer[String]] = + for { + result <- ZIO.async { (cb: Task[MessageConsumer[String]] => Unit) => + val consumer = vertxRef.get().eventBus.consumer(address, (message: Message[String]) => { + if (message.headers().get("type").equals(`type`)) { + val body = if (message.body() == null) null else new String(Base64.getDecoder.decode(message.body())) + runEffect(onMessage(body)) + }} + ) + consumer.completionHandler((result: AsyncResult[Void]) => + if (result.succeeded()) cb(logger.debug(s"Subscribed to address: $address").as(consumer)) + else cb(logger.error(s"Failed to subscribe to address: $address") *> ZIO.fail(result.cause())) + ) + } + } yield result + + + def unsubscribe(consumer: MessageConsumer[String]): Task[Unit] = + for { + result <- ZIO.async { (cb: Task[Unit] => Unit) => + consumer.unregister((result: AsyncResult[Void]) => + if (result.succeeded()) cb(logger.debug(s"Unsubscribed from address: ${consumer.address()}").unit) + else cb(logger.error(s"Failed to unsubscribe from address: ${consumer.address()}") *> ZIO.fail(result.cause())) + ) + } + } yield result + + + def publishMessage(address: Address, `type`: String, message: String): Task[Unit] = + for { + m <- ZIO.attempt(Base64.getEncoder.encode(message.getBytes("UTF-8"))) + _ <- ZIO.attempt(vertxRef.get().eventBus.publish(address, new String(m), new DeliveryOptions().addHeader("type", `type`))) + } yield () + + + def publishMessage(address: Address, `type`: String): Task[Unit] = + ZIO.attempt(vertxRef.get().eventBus.send(address, null, new DeliveryOptions().addHeader("type", `type`))) + + + def sendMessage(address: Address, `type`: String, message: String): Task[Unit] = + for { + m <- ZIO.attempt(Base64.getEncoder.encode(message.getBytes("UTF-8"))) + _ <- ZIO.attempt(vertxRef.get().eventBus.send(address, new String(m), new DeliveryOptions().addHeader("type", `type`))) + } yield () + + + def sendMessage(address: Address, `type`: String): Task[Unit] = + ZIO.attempt(vertxRef.get().eventBus.send(address, null, new DeliveryOptions().addHeader("type", `type`))) + + + def service(name: String): Task[Option[Record]] = + for { + sd <- serviceDiscovery + fn = (record: Record) => Boolean.box(record.getName.equals(name)) + record <- ZIO.async { (cb: Task[Option[Record]] => Unit) => + sd.getRecord(asJavaFunction(fn), (result: AsyncResult[Record]) => + if (result.succeeded()) cb(ZIO.succeed(Option(result.result()))) else cb(ZIO.fail(result.cause())) + ) + } + } yield record + + + def services(filters: Map[String, String]): Task[List[Record]] = + for { + sd <- serviceDiscovery + json = new JsonObject() + _ = filters.foreach { case (k, v) => json.put(k, v) } + record <- ZIO.async { (cb: Task[List[Record]] => Unit) => + sd.getRecords(json, (result: AsyncResult[java.util.List[Record]]) => + if (result.succeeded()) cb(ZIO.succeed(result.result().asScala.toList)) else cb(ZIO.fail(result.cause())) + ) + } + } yield record + + + def registerServiceListener(name: String, onChange: Record => Unit): UIO[Unit] = + ZIO.succeed(serviceDiscoveryListeners.put(name, onChange)) + + + def deregisterServiceListener(name: String): UIO[Unit] = + ZIO.succeed(serviceDiscoveryListeners.remove(name)) + + + def lock(name: String): Task[Lock] = + ZIO.async { (cb: Task[Lock] => Unit) => + vertxRef.get().sharedData().getLock(name, (result: AsyncResult[Lock]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + def lockWithTimeout(name: String, timeoutSeconds: String, onLock: Lock => Task[Unit]): Task[Lock] = + ZIO.async { (cb: Task[Lock] => Unit) => + vertxRef.get().sharedData().getLock(name, (result: AsyncResult[Lock]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + def getCounter(name: String): Task[Counter] = + ZIO.async { (cb: Task[Counter] => Unit) => + vertxRef.get().sharedData().getCounter(name, (result: AsyncResult[Counter]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + private def withMap[K, V, X](name: String, fn: (AsyncMap[K, V], Handler[AsyncResult[X]]) => Unit): Task[X] = + for { + map <- getMap[K, V](name) + result <- ZIO.async { (cb: Task[X] => Unit) => + fn(map, result => if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause()))) + } + } yield result + + + def clearMap[K, V](name: String): Task[Unit] = + withMap[K, V, Void](name, (map, handler) => map.clear(handler)).unit + + + def getMap[K, V](name: String): Task[AsyncMap[K, V]] = + ZIO.async { (cb: Task[AsyncMap[K, V]] => Unit) => + vertxRef.get().sharedData().getAsyncMap[K, V](name, (result: AsyncResult[AsyncMap[K, V]]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + def getMapKeys[K, V](name: String): Task[Set[K]] = + withMap[K, V, java.util.Set[K]](name, (map, handler) => map.keys(handler)).map(_.asScala.toSet) + + + def getMapValues[K, V](name: String): Task[List[V]] = + withMap[K, V, java.util.List[V]](name, (map, handler) => map.values(handler)).map(_.asScala.toList) + + + def getMapValue[K, V](name: String, key: K): Task[Option[V]] = + withMap[K, V, V](name, (map, handler) => map.get(key, handler)).map(Option.apply) + + + def putMapValue[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[Unit] = + withMap[K, V, Void](name, (map, handler) => if (ttl.nonEmpty) map.put(key, value, ttl.get, handler) else map.put(key, value, handler)).unit + + + def removeMapValue[K, V](name: String, key: K): Task[Unit] = + withMap[K, V, Void](name, (map, _) => map.remove(key)).unit + + + def putMapValueIfAbsent[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[V] = + withMap[K, V, V](name, (map, handler) => if (ttl.nonEmpty) map.putIfAbsent(key, value, ttl.get, handler) else map.putIfAbsent(key, value, handler)) + + + def getOrCreateContext: UIO[Context] = + ZIO.succeed(vertxRef.get().getOrCreateContext()) + + + def close: Task[Unit] = + ZIO.attempt(vertxRef.get().close()) + + + def eventBus: UIO[EventBus] = + ZIO.succeed(vertxRef.get().eventBus()) + + + def startHttpServer(domain: String, + proxyDomain: Option[String] = None, + routes: List[Route] = List(), + clustered: Boolean = false, + defaultHandler: Option[RouteHandler] = None, + proxyMapping: Option[RoutingContext => Task[Option[URI]]] = None, + webSocketProxyMapping: Option[WebSocketHeaders => Task[WSURI]] = None, + errorHandlers: Map[Int, RoutingContext => Task[Response]] = Map(), + eventBusInbound: List[String] = List(), + eventBusOutbound: List[String] = List(), + authTypes: List[AuthType] = List(), + additionalAllowedHeaders: Set[String] = Set(), + postLogin: Option[(RoutingContext, Option[UserProfile]) => Task[Response]] = None, + sessionRegexp: Option[String] = None, + jwtKeySet: Option[JsonWebKeySet] = None, + logActivity: Boolean = false): Task[HttpServer] = + for { + useSSL <- config.boolean("http.useSSL", default = false) + publicSSL <- config.boolean("http.publicSSL", default = true) + listenHost <- config.string("http.listenHost", "127.0.0.1") + listenPort <- config.int("http.listenPort", 8082) + publicHost <- config.string("http.publicHost", listenHost) + publicPort <- config.int("http.publicPort", if (publicSSL) 443 else 80) + keyStorePath <- config.optString("http.keyStorePath") + keyStorePassword <- config.optPassword("http.keyStorePassword") + proxyTimeout <- config.long("http.proxyTimeout", 24 * 60 * 60) + uploadsDirectory <- config.path("http.uploadsDirectory", Files.createTempDirectory("harana")) + + publicUrl = if (publicSSL) s"""https://$domain${if (!publicPort.equals(443)) s":$publicPort" else ""}""" else s"""http://$domain${if (!publicPort.equals(80)) s":$publicPort" else ""}""" + + vx <- vertx(clustered) + _ = vertxRef.set(vx) + + router <- ZIO.succeed(Router.router(vx)) + +// FIXME: What is this for ? +// _ = router.route().handler((rc: RoutingContext) => { +// rc.request().pause() +// rc.next() +// }) + + clusteredStore <- ZIO.attempt(CookieSessionStore.create(vx, "temp")) + sessionStore <- ZIO.attempt(new VertxSessionStore(clusteredStore)) + sessionHandler <- ZIO.attempt(SessionHandler.create(clusteredStore)) + templateEngine <- ZIO.attempt(HandlebarsTemplateEngine.create(vx)) + webClient <- ZIO.attempt(WebClient.create(vx, new WebClientOptions().setFollowRedirects(false).setMaxRedirects(1))) + httpClient <- ZIO.attempt(vx.createHttpClient()) + + _ <- ZIO.attempt { + // Custom Routes + routes.foreach { route => + + def handler(rc: RoutingContext): Unit = + generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, route.handler, route.secured) + + route.handler match { + case RouteHandler.Standard(_) | RouteHandler.FileUpload(_) => router.route().handler(BodyHandler.create()) + case RouteHandler.Stream(_) => router.route().handler(rc => rc.request().pause()) + } + + if (route.regex) { + if (route.blocking) + router.routeWithRegex(route.method, route.path).virtualHost(domain).blockingHandler(handler) + else + router.routeWithRegex(route.method, route.path).virtualHost(domain).handler(handler) + } + else { + val customRoute = + if (route.blocking) + router.route(route.method, route.path).virtualHost(domain).blockingHandler(handler).useNormalizedPath(route.normalisedPath) + else + router.route(route.method, route.path).virtualHost(domain).handler(handler).useNormalizedPath(route.normalisedPath) + + if (route.consumes.nonEmpty) customRoute.consumes(route.consumes.get.value) + if (route.produces.nonEmpty) customRoute.produces(route.produces.get.value) + } + } + + // Common + //router.route(HttpMethod.POST, "/eventbus").handler(BodyHandler.create()) + //router.route(HttpMethod.PUT, "/eventbus").handler(BodyHandler.create()) + router.mountSubRouter("/eventbus", Handlers.sock(vx, eventBusInbound, eventBusOutbound)) + router.get("/metrics").handler(PrometheusScrapingHandler.create()) + router.get("/health").handler(rc => { + val response = rc.response.putHeader("content-type", "text/plain") + if (GCHealthCheck.current.isHealthy) + response.setStatusCode(200).end("HEALTHY") + else + response.setStatusCode(503).end("UNHEALTHY") + }) + router.get("/ready").handler(rc => rc.response.putHeader("content-type", "text/plain").setStatusCode(200).end("READY")) + + // Public + // FIXME - Use StaticHandler in Production + router.get("/public/*").handler((rc: RoutingContext) => { + val path = s"${System.getProperty("user.dir")}/src/main/resources${rc.request().uri}" + sendFile(new File(path), vx, rc) + }) + + // CORS + router.route().handler(CorsHandler.create(".*.") + .allowCredentials(true) + .allowedHeaders((defaultAllowedHeaders ++ additionalAllowedHeaders).asJava) + .allowedMethods(defaultAllowedMethods.asJava)) + + // Auth + if (authTypes.nonEmpty) { + val clients = authTypes.map(AuthType.getClient(vx, publicUrl, _)) + val authConfig = new Pac4jConfig(new Clients(publicUrl + "/callback", clients: _*)) + authConfig.setHttpActionAdapter(new VertxHttpActionAdapter()) + + val callbackHandlerOptions = new CallbackHandlerOptions().setDefaultUrl("/postLogin").setMultiProfile(true) + val callbackHandler = new CallbackHandler(vx, sessionStore, authConfig, callbackHandlerOptions) + + if (sessionRegexp.nonEmpty) router.routeWithRegex(sessionRegexp.get).handler(sessionHandler) + router.route.handler(sessionHandler) + + if (jwtKeySet.nonEmpty) router.get("/jwks").handler(Handlers.jwks(jwtKeySet.get)) + router.get("/callback").handler(callbackHandler) + router.post("/callback").handler(BodyHandler.create().setMergeFormAttributes(true)) + router.post("/callback").handler(callbackHandler) + router.get("/login").handler(Handlers.loginForm(vx, authConfig, "public/login.hbs", Map())) + router.get("/forceLogin").handler(Handlers.forceLogin(authConfig, sessionStore)) + router.get("/confirm").handler(Handlers.loginForm(vx, authConfig, "public/login.hbs", Map())) + router.get("/logout").handler(new LogoutHandler(vx, sessionStore, new LogoutHandlerOptions(), authConfig)) + router.get("/centralLogout").handler(Handlers.centralLogout(vx, authConfig, sessionStore, publicUrl)) + router.get("/postLogin").handler(rc => { + val profileManager = new VertxProfileManager(new VertxWebContext(rc, sessionStore), sessionStore) + val postLoginHandler = postLogin.get.apply(_, profileManager.getProfile.asScala) + generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, RouteHandler.Standard(postLoginHandler)) + }) + } + + // Proxy + if (proxyDomain.nonEmpty && proxyMapping.nonEmpty) { + val client = new WebProxyClient(webClient, WebProxyClientOptions(iFrameAncestors = List(domain, proxyDomain.get))) + router.route().virtualHost(proxyDomain.get).blockingHandler(rc => + runEffect(proxyMapping.get(rc)) match { + case Some(uri) => client.execute(rc, "/*", uri) + case None => rc.response.end() + } + ) + } + + // Errors + router.route.failureHandler((rc: RoutingContext) => { + val response = rc.response + errorHandlers.get(response.getStatusCode) match { + case Some(r) => generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, RouteHandler.Standard(r)) + case None => if (!response.closed() && !response.ended()) response.end() + } + }) + + // Default handler + if (defaultHandler.nonEmpty) + router.route.handler(rc => generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, defaultHandler.get)) + + router + } + + options <- ZIO.succeed { + var httpServerOptions = new HttpServerOptions() + .setCompressionSupported(true) + .setDecompressionSupported(true) + .setLogActivity(logActivity) + .setHandle100ContinueAutomatically(true) + .setHost(listenHost) + .setMaxHeaderSize(1024 * 16) + .setPort(listenPort) + .setSsl(useSSL) + .setUseAlpn(getVersion >= 9) + + if (keyStorePath.nonEmpty) httpServerOptions = httpServerOptions.setKeyStoreOptions( + new JksOptions().setPath(keyStorePath.get).setPassword(keyStorePassword.get) + ) + + httpServerOptions + } + + httpServer <- ZIO.async { (cb: Task[HttpServer] => Unit) => + vx.createHttpServer(options) + .requestHandler(router) + .webSocketHandler(sourceSocket => { + if (webSocketProxyMapping.nonEmpty && !sourceSocket.uri().startsWith("/eventbus")) { + val target = runEffect(webSocketProxyMapping.get(sourceSocket.headers())) + httpClient.webSocket(target.port, target.host, sourceSocket.uri(), (connection: AsyncResult[WebSocket]) => { + if (connection.succeeded()) { + val targetSocket = connection.result() + syncSockets(sourceSocket, targetSocket) + } else { + logger.warn(s"Failed to connect to backend WS: $target") + } + }) + } + }) + .listen(listenPort, listenHost, (result: AsyncResult[HttpServer]) => + if (result.succeeded()) + cb( + ( + logger.info(s"Started HTTP server on $listenHost:$listenPort") *> + logger.info(s"Routes: ${router.getRoutes.asScala.map(_.getPath).mkString(", ")}") + ).as(result.result()) + ) + else + cb(logger.error(s"Failed to start HTTP server on $listenHost:$listenPort") *> ZIO.fail(result.cause())) + ) + } + + } yield httpServer + + + def startNetServer(listenHost: String, listenPort: Int, options: Option[NetServerOptions] = None): Task[NetServer] = + ZIO.async { (cb: Task[NetServer] => Unit) => + vertxRef.get().createNetServer().listen(listenPort, listenHost, (result: AsyncResult[NetServer]) => + if (result.succeeded()) cb(ZIO.attempt(result.result())) else cb(ZIO.fail(result.cause()))) + } + + + def startTcpEventBusServer(listenHost: String, listenPort: Int, inAddressRegex: String, outAddressRegex: String): Task[Unit] = + ZIO.async { (cb: Task[Unit] => Unit) => + TcpEventBusBridge.create(vertxRef.get(), new BridgeOptions() + .addInboundPermitted(new PermittedOptions().setAddressRegex(inAddressRegex)) + .addOutboundPermitted(new PermittedOptions().setAddressRegex(outAddressRegex))) + .listen(listenPort, listenHost, (result: AsyncResult[TcpEventBusBridge]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause()))) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/Vertx.scala b/jvm/src/main/scala/com/harana/modules/vertx/Vertx.scala new file mode 100644 index 0000000..28f1f6c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/Vertx.scala @@ -0,0 +1,72 @@ +package com.harana.modules.vertx + +import com.harana.modules.vertx.models._ +import com.harana.modules.vertx.proxy.WSURI +import io.vertx.core.eventbus.{EventBus, MessageConsumer} +import io.vertx.core.http.HttpServer +import io.vertx.core.net.{NetServer, NetServerOptions} +import io.vertx.core.shareddata.{AsyncMap, Counter, Lock} +import io.vertx.core.{Context, MultiMap, Vertx => VX} +import io.vertx.ext.web.RoutingContext +import io.vertx.servicediscovery.Record +import org.jose4j.jwk.JsonWebKeySet +import org.pac4j.core.profile.UserProfile +import zio.macros.accessible +import zio.{Task, UIO} + +import java.net.URI + +@accessible +trait Vertx { + + def subscribe(address: Address, `type`: String, onMessage: String => Task[Unit]): Task[MessageConsumer[String]] + def unsubscribe(consumer: MessageConsumer[String]): Task[Unit] + def publishMessage(address: Address, messageType: String, payload: String): Task[Unit] + def publishMessage(address: Address, `type`: String): Task[Unit] + def sendMessage(address: Address, `type`: String, message: String): Task[Unit] + def sendMessage(address: Address, `type`: String): Task[Unit] + + def service(name: String): Task[Option[Record]] + def services(filters: Map[String, String]): Task[List[Record]] + def registerServiceListener(name: String, onChange: Record => Unit): UIO[Unit] + def deregisterServiceListener(name: String): UIO[Unit] + + def lock(name: String): Task[Lock] + def lockWithTimeout(name: String, timeoutSeconds: String, onLock: Lock => Task[Unit]): Task[Lock] + + def getCounter(name: String): Task[Counter] + + def clearMap[K, V](name: String): Task[Unit] + def getMap[K, V](name: String): Task[AsyncMap[K, V]] + def getMapKeys[K, V](name: String): Task[Set[K]] + def getMapValues[K, V](name: String): Task[List[V]] + def getMapValue[K, V](name: String, key: K): Task[Option[V]] + def putMapValue[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[Unit] + def removeMapValue[K, V](name: String, key: K): Task[Unit] + def putMapValueIfAbsent[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[V] + + def getOrCreateContext: UIO[Context] + def close: Task[Unit] + + def eventBus: UIO[EventBus] + def startHttpServer(domain: String, + proxyDomain: Option[String] = None, + routes: List[Route] = List(), + clustered: Boolean = false, + defaultHandler: Option[RouteHandler] = None, + proxyMapping: Option[RoutingContext => Task[Option[URI]]] = None, + webSocketProxyMapping: Option[WebSocketHeaders => Task[WSURI]] = None, + errorHandlers: Map[Int, RoutingContext => Task[Response]] = Map(), + eventBusInbound: List[String] = List(), + eventBusOutbound: List[String] = List(), + authTypes: List[AuthType] = List(), + additionalAllowedHeaders: Set[String] = Set(), + postLogin: Option[(RoutingContext, Option[UserProfile]) => Task[Response]] = None, + sessionRegexp: Option[String] = None, + jwtKeySet: Option[JsonWebKeySet] = None, + logActivity: Boolean = false): Task[HttpServer] + def startNetServer(listenHost: String, listenPort: Int, options: Option[NetServerOptions] = None): Task[NetServer] + def startTcpEventBusServer(listenHost: String, listenPort: Int, inAddressRegex: String, outAddressRegex: String): Task[Unit] + def underlying: UIO[VX] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/VertxUtils.scala b/jvm/src/main/scala/com/harana/modules/vertx/VertxUtils.scala new file mode 100644 index 0000000..cbc77b9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/VertxUtils.scala @@ -0,0 +1,37 @@ +package com.harana.modules.vertx + +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.HttpHeaders +import io.vertx.core.streams.Pump +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import io.vertx.ext.web.RoutingContext +import org.reactivestreams.{Subscriber, Subscription} +import zio._ + +import scala.collection.mutable.ArrayBuffer + +object VertxUtils { + + def streamToString(rc: RoutingContext, stream: ReactiveWriteStream[Buffer], streamPump: Pump) = + ZIO.async((cb: Task[String] => Unit) => stream.subscribe(new Subscriber[Buffer] { + val bytes = ArrayBuffer.empty[Byte] + var remaining = rc.request().getHeader(HttpHeaders.CONTENT_LENGTH).toLong + println("Waiting for subscription") + + def onSubscribe(s: Subscription) = { + println("Subscribed to stream .. starting pump") + streamPump.start() + s.request(remaining) + } + def onNext(t: Buffer) = { + bytes.addAll(t.getBytes) + remaining -= t.length() + if (remaining == 0) onComplete() + } + def onError(t: Throwable) = cb(ZIO.succeed(streamPump.stop()) *> ZIO.fail(t)) + def onComplete() = cb({ + println("Completed stream .. ") + ZIO.succeed(streamPump.stop()) *> ZIO.attempt(new String(bytes.toArray)) + }) + })) +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/gc/GCDetails.scala b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCDetails.scala new file mode 100644 index 0000000..b66be06 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCDetails.scala @@ -0,0 +1,9 @@ +package com.harana.modules.vertx.gc + +case class GCDetails(maxPercent: Int = 0, + gcTimeInPercent: Double = 0.0, + accessTimeMillis: Long = 0L) { + + def isHealthy: Boolean = + gcTimeInPercent <= maxPercent +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/gc/GCHealthCheck.scala b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCHealthCheck.scala new file mode 100644 index 0000000..ceda9b1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCHealthCheck.scala @@ -0,0 +1,50 @@ +package com.harana.modules.vertx.gc + +import java.lang.management.ManagementFactory +import scala.jdk.CollectionConverters._ + +object GCHealthCheck { + + private val PERCENT_OF_PERCENT_FACTOR = 10000 + private val PERCENT_FACTOR = 100.0 + + private var lastTimeAccessed = 0L + private var lastCollectionTime = 0L + + private val maxPercent = 20 + + /** + * The HealthCheck does not make sense when calling healthCheck.current() within a very short interval. + * In that case, it will most likely be in failed state (100% GC). + * The checks should be executed with some delay, to get reasonable percentage values. + * + * @return The current garbage collection details + */ + def current: GCDetails = { + val collectionTime = getCollectionTimeMillis + val accessTimeMillis = System.currentTimeMillis + updateTimeStamps(collectionTime, accessTimeMillis) + + GCDetails(maxPercent, getGCTimeInPercent(collectionTime), accessTimeMillis) + } + + private def getGCTimeInPercent(collectionTime: Long): Double = { + if (lastTimeAccessed == 0) return 0 + val timeSinceLastAccessed = System.currentTimeMillis - lastTimeAccessed + if (timeSinceLastAccessed <= 0) return 0 + val thisCollectionTime = collectionTime - lastCollectionTime + val gcTimeInPercentOfPercents = thisCollectionTime * PERCENT_OF_PERCENT_FACTOR / timeSinceLastAccessed + gcTimeInPercentOfPercents / PERCENT_FACTOR + } + + private def updateTimeStamps(collectionTimeMillis: Long, accesTimeMillis: Long): Unit = { + lastCollectionTime = collectionTimeMillis + lastTimeAccessed = accesTimeMillis + } + + private def getCollectionTimeMillis = + ManagementFactory.getGarbageCollectorMXBeans.asScala + .map(_.getCollectionTime) + .filter(_ != -1) + .sum +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/AuthType.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/AuthType.scala new file mode 100644 index 0000000..1d5c42d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/AuthType.scala @@ -0,0 +1,181 @@ +package com.harana.modules.vertx.models + +import io.vertx.core.{Vertx => VX} +import org.pac4j.cas.client.CasClient +import org.pac4j.cas.config.CasConfiguration +import org.pac4j.core.client.Client +import org.pac4j.core.credentials.authenticator.Authenticator +import org.pac4j.core.profile.creator.ProfileCreator +import org.pac4j.http.client.direct.{DirectBasicAuthClient, ParameterClient} +import org.pac4j.http.client.indirect.FormClient +import org.pac4j.http.credentials.authenticator.test.SimpleTestUsernamePasswordAuthenticator +import org.pac4j.jwt.config.signature.SecretSignatureConfiguration +import org.pac4j.jwt.credentials.authenticator.JwtAuthenticator +import org.pac4j.oauth.client.Google2Client.Google2Scope +import org.pac4j.oauth.client.QQClient.QQScope +import org.pac4j.oauth.client.WechatClient.WechatScope +import org.pac4j.oauth.client.WeiboClient.WeiboScope +import org.pac4j.oauth.client._ +import org.pac4j.oidc.client.OidcClient +import org.pac4j.oidc.config.OidcConfiguration +import org.pac4j.saml.client.SAML2Client +import org.pac4j.saml.config.SAML2Configuration + +import java.io.File +import scala.jdk.CollectionConverters._ + +sealed trait AuthType +object AuthType { + + case class Bitbucket(key: String, secret: String) extends AuthType + case class Dropbox(key: String, secret: String) extends AuthType + case class Facebook(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Github(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Google(key: String, secret: String, scope: Option[Google2Scope] = None) extends AuthType + case class HiOrg(key: String, secret: String) extends AuthType + case class Linkedin(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Odnoklassniki(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Paypal(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class QQ(key: String, secret: String, scope: List[QQScope] = List()) extends AuthType + case class Strava(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Twitter(key: String, secret: String, includeEmail: Boolean) extends AuthType + case class Vk(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Wechat(key: String, secret: String, scopes: List[WechatScope] = List()) extends AuthType + case class Weibo(key: String, secret: String, scope: Option[WeiboScope] = None) extends AuthType + case class WindowsLive(key: String, secret: String) extends AuthType + case class Wordpress(key: String, secret: String) extends AuthType + case class Yahoo(key: String, secret: String) extends AuthType + + case class Basic(authenticator: Option[Authenticator]) extends AuthType + + case class CAS(url: String) extends AuthType + + case class Form(loginFormUrl: String, + authenticator: Authenticator, + profileCreator: ProfileCreator) extends AuthType + + case class JWT(salt: String) extends AuthType + + case class OIDC(clientId: String, + secret: String, + discoveryUri: String, + customParams: Map[String, String] = Map()) extends AuthType + + case class SAML(keystore: File, + keystorePassword: String, + privateKeyPassword: String, + identityProviderMetadataResource: String, + maximumAuthenticationLifetime: Int, + serviceProviderEntityId: String, + serviceProviderMetadata: File) extends AuthType + + def getClient(vx: VX, baseUrl: String, authType: AuthType): Client = + authType match { + case AuthType.Bitbucket(key, secret) => + new BitbucketClient(key, secret) + + case AuthType.Dropbox(key, secret) => + new DropBoxClient(key, secret) + + case AuthType.Facebook(key, secret, scope) => + val client = new FacebookClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Github(key, secret, scope) => + val client = new GitHubClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Google(key, secret, scope) => + val client = new Google2Client(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.HiOrg(key, secret) => + new HiOrgServerClient(key, secret) + + case AuthType.Linkedin(key, secret, scope) => + val client = new LinkedIn2Client(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Odnoklassniki(key, secret, publicKey) => + new OkClient(key, secret, publicKey.orNull) + + case AuthType.Paypal(key, secret, scope) => + val client = new PayPalClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.QQ(key, secret, scopes) => + val client = new QQClient(key, secret) + client.setScopes(scopes.asJava) + client + + case AuthType.Strava(key, secret, scope) => + val client = new StravaClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Twitter(key, secret, includeEmail) => + new TwitterClient(key, secret, includeEmail) + + case AuthType.Vk(key, secret, scope) => + val client = new VkClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Wechat(key, secret, scopes) => + val client = new WechatClient(key, secret) + client.setScopes(scopes.asJava) + client + + case AuthType.Weibo(key, secret, scope) => + val client = new WeiboClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.WindowsLive(key, secret) => + new WindowsLiveClient(key, secret) + + case AuthType.Wordpress(key, secret) => + new WordPressClient(key, secret) + + case AuthType.Yahoo(key, secret) => + new YahooClient(key, secret) + + case AuthType.Basic(authenticator) => + new DirectBasicAuthClient(authenticator.getOrElse(new SimpleTestUsernamePasswordAuthenticator())) + + case AuthType.CAS(url) => + val cfg = new CasConfiguration(url) +// cfg.setLogoutHandler(new LogoutHandler(vx, new VertxLocalMapStore[String, AnyRef](vx), false)) + new CasClient(cfg) + + case AuthType.Form(loginFormUrl, authenticator, profileCreator) => + new FormClient(loginFormUrl, authenticator, profileCreator) + + case AuthType.JWT(salt) => + val parameterClient = new ParameterClient("token", new JwtAuthenticator(new SecretSignatureConfiguration(salt))) + parameterClient.setSupportGetRequest(true) + parameterClient.setSupportPostRequest(false) + parameterClient + + case AuthType.OIDC(clientId, secret, discoveryUri, customParams) => + val cfg = new OidcConfiguration + cfg.setClientId(clientId) + cfg.setSecret(secret) + cfg.setDiscoveryURI(discoveryUri) + customParams.foreach { case (k, v) => cfg.addCustomParam(k, v)} + new OidcClient(cfg) + + case AuthType.SAML(keystore, keystorePassword, privateKeyPassword, identityProviderMetadataResource, + maximumAuthenticationLifetime, serviceProviderEntityId, serviceProviderMetadata) => + val cfg = new SAML2Configuration(keystore.getAbsolutePath, keystorePassword, privateKeyPassword, identityProviderMetadataResource) + cfg.setMaximumAuthenticationLifetime(maximumAuthenticationLifetime) + cfg.setServiceProviderEntityId(serviceProviderEntityId) + cfg.setServiceProviderMetadataPath(serviceProviderMetadata.getAbsolutePath) + new SAML2Client(cfg) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/ContentType.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/ContentType.scala new file mode 100644 index 0000000..f2209ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/ContentType.scala @@ -0,0 +1,15 @@ +package com.harana.modules.vertx.models + +final case class ContentType(value: String) extends AnyVal { + override def toString: String = value +} + +object ContentType { + val Plain = ContentType("text/plain") + val HTML = ContentType("text/html") + val CSV = ContentType("text/csv") + val XML = ContentType("text/xml") + val JSON = ContentType("application/json") + val OctetStream = ContentType("application/octet-stream") + val Form = ContentType("application/x-www-form-urlencoded") +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/CrossOriginResourceSharing.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/CrossOriginResourceSharing.scala new file mode 100644 index 0000000..e0b2885 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/CrossOriginResourceSharing.scala @@ -0,0 +1,52 @@ +package com.harana.modules.vertx.models + +import com.google.common.base.{Joiner, Splitter} +import com.harana.modules.vertx.models.CrossOriginResourceSharing._ + +import java.util.regex.Pattern +import scala.jdk.CollectionConverters._ + +case class CrossOriginResourceSharing(allowedOrigins: Set[String], + allowedMethods: Set[String], + allowedHeaders: Set[String]) { + + val allowedHeadersRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(allowedHeaders.asJava) + val allowedMethodsRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(allowedMethods.asJava) + val anyOriginAllowed = allowedOrigins.contains(ALLOW_ANY_ORIGIN) + val allowedOriginPatterns = allowedOrigins.filterNot(_.equals(ALLOW_ANY_ORIGIN)).map(Pattern.compile(_, Pattern.CASE_INSENSITIVE)) + + def getAllowedMethods = + allowedMethodsRaw + + def getAllowedOrigin(origin: String) = + if (anyOriginAllowed) ALLOW_ANY_ORIGIN else origin + + def isOriginAllowed(origin: String): Boolean = + anyOriginAllowed || allowedOriginPatterns.forall(_.matcher(origin).matches) + + def isMethodAllowed(method: String) = + allowedMethods.contains(method) + + def isEveryHeaderAllowed(headers: String) = + allowedHeadersRaw.equals(ALLOW_ANY_HEADER) || Splitter.on(HEADER_VALUE_SEPARATOR).split(headers).asScala.forall(allowedHeaders.contains) + + override def equals(obj: Any): Boolean = { + if (this == obj) return true + if (obj == null || !obj.isInstanceOf[CrossOriginResourceSharing]) return false + + val that = obj.asInstanceOf[CrossOriginResourceSharing] + this.allowedOrigins.equals(that.allowedOrigins) && + this.allowedMethodsRaw.equals(that.allowedMethodsRaw) && + this.allowedHeadersRaw.equals(that.allowedHeadersRaw) + } +} + +object CrossOriginResourceSharing { + val SUPPORTED_METHODS = Set("GET", "HEAD", "PUT", "POST") + val HEADER_VALUE_SEPARATOR = ", " + val ALLOW_ANY_ORIGIN = "*" + val ALLOW_ANY_HEADER = "*" + + def apply() = + new CrossOriginResourceSharing(Set(ALLOW_ANY_ORIGIN), SUPPORTED_METHODS, Set(ALLOW_ANY_HEADER)) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/Response.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/Response.scala new file mode 100644 index 0000000..b0e326d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/Response.scala @@ -0,0 +1,86 @@ +package com.harana.modules.vertx.models + +import io.circe.Json +import io.vertx.core.buffer.{Buffer => VertxBuffer} +import io.vertx.core.http.Cookie +import io.vertx.core.streams.{ReadStream => VertxReadStream} + +sealed trait Response { + val contentType: Option[ContentType] + val cookies: List[Cookie] + val statusCode: Option[Int] + val headers: Map[_<: CharSequence, List[_<: CharSequence]] +} + +object Response { + + case class Buffer(buffer: VertxBuffer, + gzipped: Boolean = false, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Content(content: String, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Empty(contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class File(filename: String, + inputStream: java.io.InputStream, + gzipped: Boolean = false, + contentSize: Option[Long] = None, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class InputStream(inputStream: java.io.InputStream, + gzipped: Boolean = false, + contentSize: Option[Long] = None, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class JSON(content: Json, + contentType: Option[ContentType] = Some(ContentType.JSON), + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class ReadStream(stream: VertxReadStream[VertxBuffer], + contentSize: Option[Long] = None, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Redirect(url: String, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Template(path: String, + parameters: Map[String, AnyRef] = Map(), + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/Route.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/Route.scala new file mode 100644 index 0000000..c237241 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/Route.scala @@ -0,0 +1,28 @@ +package com.harana.modules.vertx.models + +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.HttpMethod +import io.vertx.core.streams.Pump +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import io.vertx.ext.web.{RoutingContext, FileUpload => VertxFileUpload} +import zio.Task + +import java.nio.file.Path + +case class Route(path: String, + method: HttpMethod, + handler: RouteHandler, + consumes: Option[ContentType] = None, + produces: Option[ContentType] = Some(ContentType.HTML), + multipart: Boolean = false, + secured: Boolean = false, + regex: Boolean = false, + normalisedPath: Boolean = true, + blocking: Boolean = false) + +sealed trait RouteHandler +object RouteHandler { + case class Standard(handler: RoutingContext => Task[Response]) extends RouteHandler + case class FileUpload(handler: (RoutingContext, Path, List[VertxFileUpload]) => Task[Response]) extends RouteHandler + case class Stream(handler: (RoutingContext, ReactiveWriteStream[Buffer], Pump) => Task[Response]) extends RouteHandler +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/Verticle.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/Verticle.scala new file mode 100644 index 0000000..20f072b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/Verticle.scala @@ -0,0 +1,27 @@ +package com.harana.modules.vertx.models + +import io.vertx.core.{AbstractVerticle, Promise} +import zio._ + +// FIXME +//trait Verticle extends AbstractVerticle { +// +// def run: ZIO[Nothing, Nothing, Int] +// +// override def start(startPromise: Promise[Void]): Unit = { +// Unsafe.unsafe { implicit unsafe => +// Runtime.default.unsafe.run( +// (for { +// fiber <- run.fork +// _ <- ZIO.succeed(java.lang.Runtime.getRuntime.addShutdownHook(new Thread { +// override def run() = { +// val _ = Runtime.default.unsafe.run(fiber.interrupt) +// } +// })) +// result <- fiber.join +// _ <- fiber.interrupt +// } yield result) +// ) +// } +// } +//} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AsyncFileReadStream.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AsyncFileReadStream.scala new file mode 100644 index 0000000..a4cf9fc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AsyncFileReadStream.scala @@ -0,0 +1,144 @@ +package com.harana.modules.vertx.models.streams + +import io.vertx.core.Handler +import io.vertx.core.buffer.Buffer +import io.vertx.core.streams.ReadStream +import one.jasyncfio.{AsyncFile, EventExecutor} +import org.apache.commons.lang3.SystemUtils + +import java.nio.ByteBuffer +import java.nio.channels.{AsynchronousFileChannel, CompletionHandler} +import java.nio.file.{Paths, StandardOpenOption} +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Promise +import scala.util.Try + +class AsyncFileReadStream(path: String, range: Option[(Long, Long)] = None) extends ReadStream[Buffer] { + + val file = + if (SystemUtils.IS_OS_LINUX) + Left(AsyncFile.open(path, EventExecutor.initDefault()).get()) + else + Right(AsynchronousFileChannel.open(Paths.get(path), StandardOpenOption.READ)) + + var closed = false + var readPos = if (range.nonEmpty) range.get._1 else 0L + val readBufferSize = 1024 + var readLength = if (range.nonEmpty) (range.get._2 - range.get._1)+1 else Long.MaxValue + + var handler: Option[Handler[Buffer]] = None + var exceptionHandler: Option[Handler[Throwable]] = None + var endHandler: Option[Handler[Void]] = None + + var queue = new InboundBuffer[Buffer](0) + queue.drainHandler(_ => doRead(ByteBuffer.allocateDirect(readBufferSize))) + + queue.handler { buff => + if (buff.length() > 0) { + if (this.handler.nonEmpty) this.handler.get.handle(buff) + } else { + if (this.endHandler.nonEmpty) this.endHandler.get.handle(null) + } + } + + def doRead(bb: ByteBuffer): Unit = { + val buff = Buffer.buffer(readBufferSize) + val readSize = Math.min(readBufferSize, readLength).toInt + bb.limit(readSize) + val promise = Promise[Buffer]() + promise.future.onComplete { (ar: Try[Buffer]) => { + if (ar.isSuccess) { + val buffer = ar.get + readPos += buffer.length() + readLength -= buffer.length() + if (buffer.length == 0) { + if (this.endHandler.nonEmpty) { + this.endHandler.get.handle(null) + } + } else + if (queue.write(buffer)) doRead(bb) + } else + if (this.exceptionHandler.nonEmpty) this.exceptionHandler.get.handle(ar.failed.get) + }} + + read(buff, 0, bb, readPos, promise) + } + + def read(writeBuff: Buffer, offset: Int, buff: ByteBuffer, position: Long, promise: Promise[Buffer]): Unit = + file match { + case Left(asyncFile) => + val tempBuffer = ByteBuffer.allocateDirect(readBufferSize) + var read = position + + while (read != 1 && buff.hasRemaining) { + read = asyncFile.read(tempBuffer, read, readBufferSize).get().toLong + promise.success(Buffer.buffer(tempBuffer.array())) + } + + case Right(channel) => + channel.read(buff, position, null, new CompletionHandler[Integer, Object]() { + var pos = position + + def completed(bytesRead: Integer, attachment: Object) = + if (bytesRead == -1) + done() + else { + if (buff.hasRemaining) { + pos += bytesRead + read(writeBuff, offset, buff, pos, promise) + } else + done() + } + + def failed(t: Throwable, attachment: Object) = { + t.printStackTrace() + promise.failure(t) + } + + def done() = { + buff.flip() + writeBuff.setBytes(offset, buff) + buff.compact() + promise.success(writeBuff) + } + }) + } + + def handler(handler: Handler[Buffer]) = { + if (closed) + this + else { + this.handler = Option(handler) + if (this.handler.nonEmpty) + doRead(ByteBuffer.allocateDirect(readBufferSize)) + else + queue.clear() + } + this + } + + def pause() = { + queue.pause() + this + } + + def resume() = { + if (!closed) queue.resume() + this + } + + def fetch(amount: Long) = { + queue.fetch(amount) + this + } + + def exceptionHandler(handler: Handler[Throwable]) = { + this.exceptionHandler = Option(handler) + this + } + + def endHandler(handler: Handler[Void]) = { + this.endHandler = Some(handler) + this + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedReadStream.java new file mode 100644 index 0000000..793584a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedReadStream.java @@ -0,0 +1,84 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.logging.Logger; +import io.vertx.core.logging.LoggerFactory; +import io.vertx.core.streams.ReadStream; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +public class AwsChunkedReadStream extends DelegateReadStream { + private static final Logger log = LoggerFactory.getLogger(AwsChunkedReadStream.class); + + private static final byte[] CRLF = "\r\n".getBytes(StandardCharsets.UTF_8); + private static final byte[] DELIMITER = ";".getBytes(StandardCharsets.UTF_8); + + private int headerEndPos = -1; + private int chunkEndPos = -1; + private Handler exceptionHandler; + private Handler endHandler; + private Buffer buffer = Buffer.buffer(); + + public AwsChunkedReadStream(ReadStream delegate) { + super(delegate); + } + + @Override + public AwsChunkedReadStream handler(Handler handler) { + if (handler == null) { + delegate.handler(null); + return this; + } + + delegate.handler(data -> { + try { + buffer.appendBuffer(data); + + if (headerEndPos == -1) { + int delimiterPos = countUntil(buffer, DELIMITER, 0) + DELIMITER.length; + headerEndPos = countUntil(buffer, CRLF, delimiterPos) + CRLF.length; + } + + if (headerEndPos > 0 && chunkEndPos == -1) { + chunkEndPos = countUntil(buffer, CRLF, headerEndPos); + } + + if (headerEndPos > 0 && chunkEndPos > 0) { + handler.handle(buffer.slice(headerEndPos + 1, chunkEndPos)); + if (endHandler != null) endHandler.handle(null); + } + + } catch(Exception e) { + if (exceptionHandler != null) + exceptionHandler.handle(e); + else + log.error("Unhandled exception", e); + } + }); + return this; + } + + @Override + public AwsChunkedReadStream endHandler(Handler handler) { + this.endHandler = handler; + return this; + } + + + @Override + public AwsChunkedReadStream exceptionHandler(Handler handler) { + exceptionHandler = handler; + delegate.exceptionHandler(handler); + return this; + } + + private int countUntil(Buffer data, byte[] sequence, int start) { + for (int i=start ; (i + sequence.length) < data.length() ; i++) { + var bytes = data.getBytes(i, i + sequence.length); + if (Arrays.equals(bytes, sequence)) return i; + } + return -1; + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedWriteStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedWriteStream.java new file mode 100644 index 0000000..02f5585 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedWriteStream.java @@ -0,0 +1,119 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.AsyncResult; +import io.vertx.core.Future; +import io.vertx.core.Handler; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.logging.Logger; +import io.vertx.core.logging.LoggerFactory; +import io.vertx.ext.reactivestreams.ReactiveWriteStream; +import org.reactivestreams.Subscriber; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +public class AwsChunkedWriteStream implements ReactiveWriteStream { + private static final Logger log = LoggerFactory.getLogger(AwsChunkedReadStream.class); + + private ReactiveWriteStream delegate; + + private int headerEndPos = -1; + private int chunkEndPos = -1; + + private static final byte[] CRLF = "\r\n".getBytes(StandardCharsets.UTF_8); + private static final byte[] DELIMITER = ";".getBytes(StandardCharsets.UTF_8); + + private Handler exceptionHandler; + + private Buffer buffer = Buffer.buffer(); + + public AwsChunkedWriteStream(ReactiveWriteStream delegate) { + this.delegate = delegate; + } + + @Override + public Future write(Buffer data) { + Buffer payload = availableData(data); + return delegate.write(payload); + } + + @Override + public void write(Buffer data, Handler> handler) { + Buffer payload = availableData(data); + delegate.write(payload, handler); + } + + @Override + public ReactiveWriteStream setWriteQueueMaxSize(int maxSize) { + return delegate.setWriteQueueMaxSize(maxSize); + } + + @Override + public boolean writeQueueFull() { + return delegate.writeQueueFull(); + } + + @Override + public ReactiveWriteStream drainHandler(Handler handler) { + delegate.drainHandler(handler); + return this; + } + + @Override + public void end(Handler> handler) { + delegate.end(handler); + } + + @Override + public ReactiveWriteStream close() { + delegate.close(); + return this; + } + + @Override + public void subscribe(Subscriber s) { + delegate.subscribe(s); + } + + @Override + public AwsChunkedWriteStream exceptionHandler(Handler handler) { + exceptionHandler = handler; + delegate.exceptionHandler(handler); + return this; + } + + private Buffer availableData(Buffer data) { + try { + buffer.appendBuffer(data); + + if (headerEndPos == -1) { + int delimiterPos = countUntil(buffer, DELIMITER, 0) + DELIMITER.length; + headerEndPos = countUntil(buffer, CRLF, delimiterPos) + CRLF.length; + } + + if (headerEndPos > 0 && chunkEndPos == -1) { + chunkEndPos = countUntil(buffer, CRLF, headerEndPos); + } + + if (headerEndPos > 0 && chunkEndPos > 0) { + System.err.println("Writing: " + headerEndPos + " -- " + chunkEndPos); + return buffer.slice(headerEndPos + 1, chunkEndPos); + } + + } catch(Exception e) { + if (exceptionHandler != null) + exceptionHandler.handle(e); + else + log.error("Unhandled exception", e); + } + return null; + } + + private int countUntil(Buffer data, byte[] sequence, int start) { + for (int i=start ; (i + sequence.length) < data.length() ; i++) { + var bytes = data.getBytes(i, i + sequence.length); + if (Arrays.equals(bytes, sequence)) return i; + } + return -1; + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/BufferReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/BufferReadStream.java new file mode 100644 index 0000000..83dd687 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/BufferReadStream.java @@ -0,0 +1,82 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.streams.ReadStream; + +/** + * A {@link ReadStream} that reads from a buffer + * @author Michel Kraemer + */ +public class BufferReadStream implements ReadStream { + private Buffer buf; + private boolean paused; + private Handler handler; + private Handler endHandler; + + /** + * Constructs a stream + * @param buf the buffer to read from + */ + public BufferReadStream(Buffer buf) { + this.buf = buf; + } + + @Override + public ReadStream exceptionHandler(Handler handler) { + // exceptions can never happen + return this; + } + + private void doWrite(Handler handler) { + handler.handle(buf); + buf = null; + if (endHandler != null) { + endHandler.handle(null); + } + } + + @Override + public ReadStream handler(Handler handler) { + if (paused) { + this.handler = handler; + } else { + doWrite(handler); + } + return this; + } + + @Override + public ReadStream pause() { + this.paused = true; + return this; + } + + @Override + public ReadStream resume() { + if (paused) { + paused = false; + if (handler != null) { + Handler h = handler; + handler = null; + doWrite(h); + } + } + return this; + } + + @Override + public ReadStream fetch(long amount) { + return resume(); + } + + @Override + public ReadStream endHandler(Handler endHandler) { + if (buf == null) { + endHandler.handle(null); + } else { + this.endHandler = endHandler; + } + return this; + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateChunkReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateChunkReadStream.java new file mode 100644 index 0000000..1a169e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateChunkReadStream.java @@ -0,0 +1,44 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.AsyncResult; +import io.vertx.core.Future; +import io.vertx.core.Handler; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.streams.ReadStream; + +/** + * A simple read stream for chunks. Wraps around another read stream that + * doesn't need to be closed or is closed by the caller. + * @author Michel Kraemer + */ +public class DelegateChunkReadStream extends DelegateReadStream { + private final long size; + + /** + * Constructs a new read stream + * @param size the chunk's size + * @param delegate the underlying read stream + */ + public DelegateChunkReadStream(long size, ReadStream delegate) { + super(delegate); + this.size = size; + } + + /** + * Create a new read stream from a chunk + * @param chunk the chunk + */ + public DelegateChunkReadStream(Buffer chunk) { + this(chunk.length(), new BufferReadStream(chunk)); + } + + public long getSize() { + return size; + } + + public void close(Handler> handler) { + if (handler != null) { + handler.handle(Future.succeededFuture()); + } + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateReadStream.java new file mode 100644 index 0000000..9c4a8f9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateReadStream.java @@ -0,0 +1,57 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.streams.ReadStream; + +/** + * A ReadStream that delegates to another one + * @author Michel Kraemer + * @param the type of the objects that can be read from the stream + */ +public class DelegateReadStream implements ReadStream { + protected final ReadStream delegate; + + /** + * Constructs a new read stream + * @param delegate the stream to delegate to + */ + public DelegateReadStream(ReadStream delegate) { + this.delegate = delegate; + } + + @Override + public ReadStream exceptionHandler(Handler handler) { + delegate.exceptionHandler(handler); + return this; + } + + @Override + public ReadStream handler(Handler handler) { + delegate.handler(handler); + return this; + } + + @Override + public ReadStream pause() { + delegate.pause(); + return this; + } + + @Override + public ReadStream resume() { + delegate.resume(); + return this; + } + + @Override + public ReadStream fetch(long amount) { + delegate.fetch(amount); + return this; + } + + @Override + public ReadStream endHandler(Handler endHandler) { + delegate.endHandler(endHandler); + return this; + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateWriteStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateWriteStream.java new file mode 100644 index 0000000..163580a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/DelegateWriteStream.java @@ -0,0 +1,53 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.codegen.annotations.Nullable; +import io.vertx.core.AsyncResult; +import io.vertx.core.Future; +import io.vertx.core.Handler; +import io.vertx.core.streams.WriteStream; + +public class DelegateWriteStream implements WriteStream { + + protected final WriteStream delegate; + + public DelegateWriteStream(WriteStream delegate) { + this.delegate = delegate; + } + + @Override + public WriteStream exceptionHandler(@Nullable Handler handler) { + delegate.exceptionHandler(handler); + return this; + } + + @Override + public Future write(T data) { + return delegate.write(data); + } + + @Override + public void write(T data, Handler> handler) { + delegate.write(data, handler); + } + + @Override + public void end(Handler> handler) { + delegate.end(handler); + } + + @Override + public WriteStream setWriteQueueMaxSize(int maxSize) { + return delegate.setWriteQueueMaxSize(maxSize); + } + + @Override + public boolean writeQueueFull() { + return delegate.writeQueueFull(); + } + + @Override + public WriteStream drainHandler(@Nullable Handler handler) { + delegate.drainHandler(handler); + return this; + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/FilteringStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/FilteringStream.java new file mode 100644 index 0000000..5b8f4aa --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/FilteringStream.java @@ -0,0 +1,201 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Context; +import io.vertx.core.Handler; +import io.vertx.core.Vertx; +import io.vertx.core.streams.ReadStream; +import io.vertx.core.streams.impl.InboundBuffer; + +import java.util.Objects; +import java.util.function.Predicate; + +/** + * @author Thomas Segismont + */ +public final class FilteringStream implements ReadStream { + + private final ReadStream source; + private final Predicate predicate; + private final Context context; + private final InboundBuffer queue; + + private boolean ended; + private Throwable error; + private boolean stopped; + private int inFlight; + private Handler handler; + private Handler exceptionHandler; + private Handler endHandler; + + public FilteringStream(ReadStream source, Predicate predicate) { + this(source, predicate, Vertx.currentContext()); + } + + public FilteringStream(ReadStream source, Predicate predicate, Vertx vertx) { + this(source, predicate, vertx.getOrCreateContext()); + } + + public FilteringStream(ReadStream source, Predicate predicate, Context context) { + Objects.requireNonNull(source, "Source cannot be null"); + Objects.requireNonNull(predicate, "Filtering function cannot be null"); + Objects.requireNonNull(context, "Context cannot be null"); + this.source = source; + this.predicate = predicate; + this.context = context; + queue = new InboundBuffer(context) + .exceptionHandler(throwable -> notifyTerminalHandler(getExceptionHandler(), throwable)) + .drainHandler(v -> source.resume()); + } + + @Override + public ReadStream exceptionHandler(Handler handler) { + synchronized (queue) { + if (!stopped) { + exceptionHandler = handler; + } + } + return this; + } + + private synchronized Handler getExceptionHandler() { + return exceptionHandler; + } + + @Override + public ReadStream handler(Handler handler) { + synchronized (queue) { + if (stopped) { + return this; + } + } + if (handler == null) { + notifyTerminalHandler(getEndHandler(), null); + return this; + } + this.handler = handler; + queue.handler(item -> handleOnContext(this::emit, item)); + source + .handler(item -> handleOnContext(this::filter, item)) + .exceptionHandler(throwable -> handleOnContext(this::error, throwable)) + .endHandler(v -> handleOnContext(this::exhausted, null)); + return this; + } + + private void error(Throwable t) { + boolean terminate; + synchronized (queue) { + this.error = t; + terminate = !stopped && inFlight == 0; + } + if (terminate) { + notifyTerminalHandler(getExceptionHandler(), t); + } + } + + private void exhausted(T item) { + boolean terminate; + synchronized (queue) { + ended = true; + terminate = !stopped && inFlight == 0; + } + if (terminate) { + notifyTerminalHandler(getEndHandler(), null); + } + } + + private void filter(T item) { + if (predicate.test(item)) { + synchronized (queue) { + inFlight++; + } + if (!queue.write(item)) { + source.pause(); + } + } + } + + private void emit(T item) { + int terminate = 0; + Handler h; + synchronized (queue) { + inFlight--; + h = stopped ? null : handler; + terminate = (stopped || inFlight > 0) ? 0 : (error != null ? 2 : (ended ? 1 : 0)); + } + if (h != null) { + h.handle(item); + } + if (terminate == 1) { + notifyTerminalHandler(getEndHandler(), null); + } else if (terminate == 2) { + notifyTerminalHandler(getExceptionHandler(), error); + } + } + + private void handleOnContext(Handler handler, U value) { + if (context == Vertx.currentContext()) { + handler.handle(value); + } else { + context.runOnContext(v -> handler.handle(value)); + } + } + + @Override + public ReadStream pause() { + synchronized (queue) { + if (!stopped) { + queue.pause(); + } + } + return this; + } + + @Override + public ReadStream resume() { + return fetch(Long.MAX_VALUE); + } + + @Override + public ReadStream fetch(long amount) { + synchronized (queue) { + if (!stopped) { + queue.fetch(amount); + } + } + return this; + } + + @Override + public ReadStream endHandler(Handler endHandler) { + synchronized (queue) { + if (!stopped) { + this.endHandler = endHandler; + } + } + return this; + } + + private synchronized Handler getEndHandler() { + return endHandler; + } + + private void notifyTerminalHandler(Handler handler, V value) { + Handler h; + synchronized (queue) { + if (!stopped) { + stopped = true; + queue.handler(null).drainHandler(null); + h = handler; + } else { + h = null; + } + } + if (h != null) { + if (context != Vertx.currentContext()) { + context.runOnContext(v -> h.handle(value)); + } else { + h.handle(value); + } + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/GzipReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/GzipReadStream.java new file mode 100644 index 0000000..dfd7c2a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/GzipReadStream.java @@ -0,0 +1,281 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.Vertx; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.logging.Logger; +import io.vertx.core.logging.LoggerFactory; +import io.vertx.core.streams.ReadStream; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.zip.*; + +/** + *

    A {@link ReadStream} that delegates to another one but decompresses all + * data with GZIP.

    + *

    The code is loosely based on {@link java.util.zip.GZIPInputStream}

    + * @author Michel Kraemer + */ +public class GzipReadStream extends DelegateReadStream { + private static final Logger log = LoggerFactory.getLogger(GzipReadStream.class); + + private final static int FHCRC = 2; // Header CRC + private final static int FEXTRA = 4; // Extra field + private final static int FNAME = 8; // File name + private final static int FCOMMENT = 16; // File comment + + private final Inflater inflater; + private final CRC32 crc; + private final byte[] buf; + private Handler exceptionHandler; + private Handler endHandler; + private boolean headerRead; + private Buffer headerBuffer; + private Buffer trailerBuffer; + + /** + * Creates new stream that wraps around another one + * @param delegate the stream to wrap around + */ + public GzipReadStream(ReadStream delegate) { + super(delegate); + inflater = new Inflater(true); + crc = new CRC32(); + buf = new byte[512]; + } + + @Override + public GzipReadStream exceptionHandler(Handler handler) { + exceptionHandler = handler; + delegate.exceptionHandler(handler); + return this; + } + + private void handleException(Throwable t) { + if (exceptionHandler != null && t instanceof Exception) { + exceptionHandler.handle(t); + } else { + log.error("Unhandled exception", t); + } + } + + @Override + public GzipReadStream handler(Handler handler) { + if (handler == null) { + delegate.handler(null); + return this; + } + + delegate.handler(data -> { + int start = 0; + + if (!headerRead) { + // append buf to data from previous calls (if there is any) + if (headerBuffer != null) { + headerBuffer.appendBuffer(data); + data = headerBuffer; + } + + // try to parse header + int headerSize = tryParseHeader(data); + if (headerSize > 0) { + // header was parsed successfully. skip bytes. + headerRead = true; + start = headerSize; + headerBuffer = null; + } else { + // save buf for next call + if (headerBuffer == null) { + headerBuffer = Buffer.buffer(); + headerBuffer.appendBuffer(data); + } + return; // wait for next call + } + } + + final int finalStart = start; + final Buffer finalData = data; + Vertx.currentContext().>executeBlocking(f -> { + if (inflater.finished()) { + // inflater does not want any more data - just forward it + f.complete(Pair.of(null, finalData)); + return; + } + + byte[] currentData = finalData.getBytes(finalStart, finalData.length()); + try { + Buffer r = null; + Buffer remainingBytes = null; + while (true) { + int n; + while ((n = inflater.inflate(buf, 0, buf.length)) == 0) { + if (inflater.finished() || inflater.needsDictionary()) { + int remaining = inflater.getRemaining(); + if (remaining > 0) { + // save remaining bytes to parse the trailer + remainingBytes = finalData.getBuffer(finalData.length() - remaining, + finalData.length()); + } + break; + } + if (inflater.needsInput()) { + if (currentData != null) { + inflater.setInput(currentData, 0, currentData.length); + currentData = null; + } else { + // wait for more data + break; + } + } + } + if (n == 0) { + break; + } + crc.update(buf, 0, n); + if (r == null) { + r = Buffer.buffer(); + } + r.appendBytes(buf, 0, n); + } + f.complete(Pair.of(r, remainingBytes)); + } catch (DataFormatException e) { + f.fail(e); + } + }, ar -> { + if (ar.failed()) { + handleException(ar.cause()); + return; + } + + // forward uncompressed data + Pair r = ar.result(); + Buffer b = r.getLeft(); + if (b != null && b.length() > 0) { + handler.handle(b); + } + + // initialize trailerBuffer to collect remaining bytes if + // inflater has finished + if (r.getRight() != null) { + if (trailerBuffer != null) { + trailerBuffer.appendBuffer(r.getRight()); + } else { + trailerBuffer = r.getRight(); + } + } + + // try to parse trailer if we're already at the end of the file + if (trailerBuffer != null) { + tryParseTrailer(); + } + }); + }); + + return this; + } + + /** + * Try to parse a GZIP header from the given buffer + * @param buf the buffer to parse + * @return the size of the header or 0 if the buffer was not large enough + * or invalid + */ + private int tryParseHeader(Buffer buf) { + // check if the header is large enough for mandatory fields + if (buf.length() < 10) { + return 0; + } + + // Check header magic + if (buf.getUnsignedShortLE(0) != GZIPInputStream.GZIP_MAGIC) { + handleException(new ZipException("Not in GZIP format")); + return 0; + } + + // Check compression method + if (buf.getByte(2) != 8) { + handleException(new ZipException("Unsupported compression method")); + return 0; + } + + // Read flags + int flg = buf.getByte(3); + + int n = 2 + 2 + 6; + + // Skip optional extra field + if ((flg & FEXTRA) == FEXTRA) { + if (buf.length() < n + 2) { + return 0; + } + int m = buf.getUnsignedShortLE(n); + n += m + 2; + } + + // Skip optional file name + if ((flg & FNAME) == FNAME) { + do { + if (buf.length() <= n) { + return 0; + } + n++; + } while (buf.getByte(n - 1) != 0); + } + + // Skip optional file comment + if ((flg & FCOMMENT) == FCOMMENT) { + do { + if (buf.length() <= n) { + return 0; + } + n++; + } while (buf.getByte(n - 1) != 0); + } + + // Check optional header CRC + if ((flg & FHCRC) == FHCRC) { + if (buf.length() < n + 2) { + return 0; + } + crc.reset(); + crc.update(buf.getBytes(0, n)); + int v = (int)crc.getValue() & 0xffff; + if (buf.getUnsignedShortLE(n) != v) { + handleException(new ZipException("Corrupt GZIP header")); + return 0; + } + n += 2; + crc.reset(); + } + + return n; + } + + /** + * Try to parse the trailer and call {@link #endHandler} if successful + */ + private void tryParseTrailer() { + if (trailerBuffer.length() < 8) { + // wait for more data + return; + } + + long v = trailerBuffer.getUnsignedIntLE(0); + long bytesWritten = trailerBuffer.getUnsignedIntLE(4); + + if (v != crc.getValue() || bytesWritten != (inflater.getBytesWritten() & 0xffffffffL)) { + handleException(new ZipException("Corrupt GZIP trailer")); + return; + } + + if (endHandler != null) { + endHandler.handle(null); + } + } + + @Override + public GzipReadStream endHandler(Handler endHandler) { + this.endHandler = endHandler; + return this; + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/InboundBuffer.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/InboundBuffer.java new file mode 100644 index 0000000..5e476ce --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/InboundBuffer.java @@ -0,0 +1,330 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; + +import java.util.ArrayDeque; + +public class InboundBuffer { + + /** + * A reusable sentinel for signaling the end of a stream. + */ + public static final Object END_SENTINEL = new Object(); + + private final ArrayDeque pending; + private final long highWaterMark; + private long demand; + private Handler handler; + private boolean overflow; + private Handler drainHandler; + private Handler emptyHandler; + private Handler exceptionHandler; + private boolean emitting; + + public InboundBuffer() { + this(16L); + } + + public InboundBuffer(long highWaterMark) { + if (highWaterMark < 0) { + throw new IllegalArgumentException("highWaterMark " + highWaterMark + " >= 0"); + } + this.highWaterMark = highWaterMark; + this.demand = Long.MAX_VALUE; + this.pending = new ArrayDeque<>(); + } + + /** + * Write an {@code element} to the buffer. The element will be delivered synchronously to the handler when + * it is possible, otherwise it will be queued for later delivery. + * + * @param element the element to add + * @return {@code false} when the producer should stop writing + */ + public boolean write(E element) { + Handler handler; + synchronized (this) { + if (demand == 0L || emitting) { + pending.add(element); + return checkWritable(); + } else { + if (demand != Long.MAX_VALUE) { + --demand; + } + emitting = true; + handler = this.handler; + } + } + handleEvent(handler, element); + return emitPending(); + } + + private boolean checkWritable() { + if (demand == Long.MAX_VALUE) { + return true; + } else { + long actual = pending.size() - demand; + boolean writable = actual < highWaterMark; + overflow |= !writable; + return writable; + } + } + + /** + * Write an {@code iterable} of {@code elements}. + * + * @see #write(E) + * @param elements the elements to add + * @return {@code false} when the producer should stop writing + */ + public boolean write(Iterable elements) { + synchronized (this) { + for (E element : elements) { + pending.add(element); + } + if (demand == 0L || emitting) { + return checkWritable(); + } else { + emitting = true; + } + } + return emitPending(); + } + + private boolean emitPending() { + E element; + Handler h; + while (true) { + synchronized (this) { + int size = pending.size(); + if (demand == 0L) { + emitting = false; + boolean writable = size < highWaterMark; + overflow |= !writable; + return writable; + } else if (size == 0) { + emitting = false; + return true; + } + if (demand != Long.MAX_VALUE) { + demand--; + } + element = pending.poll(); + h = this.handler; + } + handleEvent(h, element); + } + } + + /** + * Drain the buffer. + *

    + * Calling this assumes {@code (demand > 0L && !pending.isEmpty()) == true} + */ + private void drain() { + int emitted = 0; + Handler drainHandler; + Handler emptyHandler; + while (true) { + E element; + Handler handler; + synchronized (this) { + int size = pending.size(); + if (size == 0) { + emitting = false; + if (overflow) { + overflow = false; + drainHandler = this.drainHandler; + } else { + drainHandler = null; + } + emptyHandler = emitted > 0 ? this.emptyHandler : null; + break; + } else if (demand == 0L) { + emitting = false; + return; + } + emitted++; + if (demand != Long.MAX_VALUE) { + demand--; + } + element = pending.poll(); + handler = this.handler; + } + handleEvent(handler, element); + } + if (drainHandler != null) { + handleEvent(drainHandler, null); + } + if (emptyHandler != null) { + handleEvent(emptyHandler, null); + } + } + + private void handleEvent(Handler handler, T element) { + if (handler != null) { + try { + handler.handle(element); + } catch (Throwable t) { + handleException(t); + } + } + } + + private void handleException(Throwable err) { + Handler handler; + synchronized (this) { + if ((handler = exceptionHandler) == null) { + return; + } + } + handler.handle(err); + } + + /** + * Request a specific {@code amount} of elements to be fetched, the amount is added to the actual demand. + *

    + * Pending elements in the buffer will be delivered asynchronously on the context to the handler. + *

    + * This method can be called from any thread. + * + * @return {@code true} when the buffer will be drained + */ + public boolean fetch(long amount) { + if (amount < 0L) { + throw new IllegalArgumentException(); + } + synchronized (this) { + demand += amount; + if (demand < 0L) { + demand = Long.MAX_VALUE; + } + if (emitting || (pending.isEmpty() && !overflow)) { + return false; + } + emitting = true; + } + drain(); + return true; + } + + /** + * Read the most recent element synchronously. + *

    + * No handler will be called. + * + * @return the most recent element or {@code null} if no element was in the buffer + */ + public E read() { + synchronized (this) { + return pending.poll(); + } + } + + /** + * Clear the buffer synchronously. + *

    + * No handler will be called. + * + * @return a reference to this, so the API can be used fluently + */ + public synchronized InboundBuffer clear() { + pending.clear(); + return this; + } + + /** + * Pause the buffer, it sets the buffer in {@code fetch} mode and clears the actual demand. + * + * @return a reference to this, so the API can be used fluently + */ + public synchronized InboundBuffer pause() { + demand = 0L; + return this; + } + + /** + * Resume the buffer, and sets the buffer in {@code flowing} mode. + *

    + * Pending elements in the buffer will be delivered asynchronously on the context to the handler. + *

    + * This method can be called from any thread. + * + * @return {@code true} when the buffer will be drained + */ + public boolean resume() { + return fetch(Long.MAX_VALUE); + } + + /** + * Set an {@code handler} to be called with elements available from this buffer. + * + * @param handler the handler + * @return a reference to this, so the API can be used fluently + */ + public synchronized InboundBuffer handler(Handler handler) { + this.handler = handler; + return this; + } + + /** + * Set an {@code handler} to be called when the buffer is drained and the producer can resume writing to the buffer. + * + * @param handler the handler to be called + * @return a reference to this, so the API can be used fluently + */ + public synchronized InboundBuffer drainHandler(Handler handler) { + drainHandler = handler; + return this; + } + + /** + * Set an {@code handler} to be called when the buffer becomes empty. + * + * @param handler the handler to be called + * @return a reference to this, so the API can be used fluently + */ + public synchronized InboundBuffer emptyHandler(Handler handler) { + emptyHandler = handler; + return this; + } + + /** + * Set an {@code handler} to be called when an exception is thrown by an handler. + * + * @param handler the handler + * @return a reference to this, so the API can be used fluently + */ + public synchronized InboundBuffer exceptionHandler(Handler handler) { + exceptionHandler = handler; + return this; + } + + /** + * @return whether the buffer is empty + */ + public synchronized boolean isEmpty() { + return pending.isEmpty(); + } + + /** + * @return whether the buffer is writable + */ + public synchronized boolean isWritable() { + return pending.size() < highWaterMark; + } + + /** + * @return whether the buffer is paused, i.e it is in {@code fetch} mode and the demand is {@code 0}. + */ + public synchronized boolean isPaused() { + return demand == 0L; + } + + /** + * @return the actual number of elements in the buffer + */ + public synchronized int size() { + return pending.size(); + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/InputStreamReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/InputStreamReadStream.java new file mode 100644 index 0000000..6f06708 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/InputStreamReadStream.java @@ -0,0 +1,174 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.AsyncResult; +import io.vertx.core.Handler; +import io.vertx.core.Vertx; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.logging.Logger; +import io.vertx.core.logging.LoggerFactory; +import io.vertx.core.streams.ReadStream; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Transforms an {@link InputStream} to a {@link ReadStream} + * @author Michel Kraemer + */ +public class InputStreamReadStream implements ReadStream { + private static Logger log = LoggerFactory.getLogger(InputStreamReadStream.class); + + private static final int READ_BUFFER_SIZE = 8192; + + private final InputStream is; + private final Vertx vertx; + private boolean readInProgress; + private boolean paused; + private boolean closed; + private Handler exceptionHandler; + private Handler dataHandler; + private Handler endHandler; + + /** + * Create a new read stream + * @param is the input stream to transform + * @param vertx the Vert.x instance + */ + public InputStreamReadStream(InputStream is, Vertx vertx) { + this.is = is; + this.vertx = vertx; + } + + private void handleException(Throwable t) { + if (exceptionHandler != null) { + exceptionHandler.handle(t); + } else { + log.error("Unhandled exception", t); + } + } + + private synchronized void handleData(Buffer buffer) { + if (dataHandler != null) { + dataHandler.handle(buffer); + } + } + + private synchronized void handleEnd() { + if (endHandler != null) { + endHandler.handle(null); + } + } + + private void doRead() { + if (!readInProgress) { + readInProgress = true; + vertx.executeBlocking(f -> { + byte[] buf = new byte[READ_BUFFER_SIZE]; + int read; + try { + synchronized (InputStreamReadStream.this) { + read = is.read(buf, 0, READ_BUFFER_SIZE); + } + } catch (IOException e) { + f.fail(e); + return; + } + Buffer r; + if (read < 0) { + r = Buffer.buffer(); + } else if (read < READ_BUFFER_SIZE) { + r = Buffer.buffer(read); + r.setBytes(0, buf, 0, read); + } else { + r = Buffer.buffer(buf); + } + f.complete(r); + }, ar -> { + if (ar.failed()) { + handleException(ar.cause()); + } else { + readInProgress = false; + Buffer buffer = ar.result(); + if (buffer.length() == 0) { + handleEnd(); + } else { + handleData(buffer); + if (!paused && dataHandler != null) { + doRead(); + } + } + } + }); + } + } + + @Override + public InputStreamReadStream exceptionHandler(Handler handler) { + this.exceptionHandler = handler; + return this; + } + + @Override + public ReadStream handler(Handler handler) { + dataHandler = handler; + if (dataHandler != null && !paused && !closed) { + doRead(); + } + return this; + } + + @Override + public InputStreamReadStream pause() { + paused = true; + return this; + } + + @Override + public InputStreamReadStream resume() { + if (paused && !closed) { + paused = false; + if (dataHandler != null) { + doRead(); + } + } + return this; + } + + @Override + public ReadStream fetch(long amount) { + return resume(); + } + + @Override + public InputStreamReadStream endHandler(Handler handler) { + this.endHandler = handler; + return this; + } + + /** + * Close the read stream + * @param handler will be called when the operation has finished (may be null) + */ + public void close(Handler> handler) { + if (handler == null) { + handler = ar -> { + if (ar.failed()) { + handleException(ar.cause()); + } + }; + } + + closed = true; + vertx.executeBlocking(f -> { + try { + synchronized (InputStreamReadStream.this) { + is.close(); + } + } catch (IOException e) { + f.fail(e); + return; + } + f.complete(); + }, handler); + } +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/LimitingStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/LimitingStream.java new file mode 100644 index 0000000..4812357 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/LimitingStream.java @@ -0,0 +1,107 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.impl.Arguments; +import io.vertx.core.streams.ReadStream; + +import java.util.Objects; + +/** + * @author Thomas Segismont + */ +public final class LimitingStream implements ReadStream { + + private final ReadStream source; + private final long limit; + + private long received; + private boolean stopped; + private Handler exceptionHandler; + private Handler endHandler; + + public LimitingStream(ReadStream source, long limit) { + Objects.requireNonNull(source, "Source cannot be null"); + Arguments.require(limit >= 0, "Limit must be positive"); + this.source = source; + this.limit = limit; + } + + @Override + public synchronized ReadStream exceptionHandler(Handler handler) { + exceptionHandler = handler; + return this; + } + + private synchronized Handler getExceptionHandler() { + return exceptionHandler; + } + + @Override + public ReadStream handler(Handler handler) { + if (handler == null) { + source.handler(null); + return this; + } + source + .exceptionHandler(throwable -> notifyTerminalHandler(getExceptionHandler(), throwable)) + .endHandler(v -> notifyTerminalHandler(getEndHandler(), null)) + .handler(item -> { + boolean emit, terminate; + synchronized (this) { + received++; + emit = !stopped && received <= limit; + terminate = !stopped && ((received == 1 && limit == 0) || received == limit); + } + if (emit) { + handler.handle(item); + } + if (terminate) { + notifyTerminalHandler(getEndHandler(), null); + } + }); + return this; + } + + @Override + public ReadStream pause() { + source.pause(); + return this; + } + + @Override + public ReadStream resume() { + return fetch(Long.MAX_VALUE); + } + + @Override + public ReadStream fetch(long l) { + source.fetch(l); + return this; + } + + @Override + public synchronized ReadStream endHandler(Handler handler) { + endHandler = handler; + return this; + } + + private synchronized Handler getEndHandler() { + return endHandler; + } + + private void notifyTerminalHandler(Handler handler, V value) { + Handler h; + synchronized (this) { + if (!stopped) { + stopped = true; + source.handler(null).exceptionHandler(null).endHandler(null); + h = handler; + } else { + h = null; + } + } + if (h != null) { + h.handle(value); + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/MappingStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/MappingStream.java new file mode 100644 index 0000000..070723f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/MappingStream.java @@ -0,0 +1,62 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.streams.ReadStream; + +import java.util.Objects; +import java.util.function.Function; + +/** + * @author Thomas Segismont + */ +public final class MappingStream implements ReadStream { + + private final ReadStream source; + private final Function mapping; + + public MappingStream(ReadStream source, Function mapping) { + Objects.requireNonNull(source, "Source cannot be null"); + Objects.requireNonNull(mapping, "Mapping function cannot be null"); + this.source = source; + this.mapping = mapping; + } + + @Override + public ReadStream exceptionHandler(Handler handler) { + source.exceptionHandler(handler); + return this; + } + + @Override + public ReadStream handler(Handler handler) { + if (handler == null) { + source.handler(null); + } else { + source.handler(event -> handler.handle(mapping.apply(event))); + } + return this; + } + + @Override + public ReadStream pause() { + source.pause(); + return this; + } + + @Override + public ReadStream resume() { + return fetch(Long.MAX_VALUE); + } + + @Override + public ReadStream fetch(long amount) { + source.fetch(amount); + return this; + } + + @Override + public ReadStream endHandler(Handler endHandler) { + source.endHandler(endHandler); + return this; + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/Pump.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/Pump.scala new file mode 100644 index 0000000..1d5fd11 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/Pump.scala @@ -0,0 +1,13 @@ +package com.harana.modules.vertx.models.streams + +import io.vertx.core.streams.impl.PumpImpl +import io.vertx.core.streams.{ReadStream, WriteStream} + +case class Pump[T](rs: ReadStream[T], ws: WriteStream[T]) extends PumpImpl(rs, ws) { + + override def start() = { + rs.resume() + super.start() + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/SkippingStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/SkippingStream.java new file mode 100644 index 0000000..fc699af --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/SkippingStream.java @@ -0,0 +1,89 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.impl.Arguments; +import io.vertx.core.streams.ReadStream; + +import java.util.Objects; + +/** + * @author Thomas Segismont + */ +public final class SkippingStream implements ReadStream { + + private final ReadStream source; + private final long skip; + + private long skipped; + + public SkippingStream(ReadStream source, long skip) { + Objects.requireNonNull(source, "Source cannot be null"); + Arguments.require(skip >= 0, "Skip amount must be positive"); + this.source = source; + this.skip = skip; + } + + @Override + public ReadStream exceptionHandler(Handler handler) { + source.exceptionHandler(handler); + return this; + } + + @Override + public ReadStream handler(Handler handler) { + if (handler == null) { + source.handler(null); + return this; + } + source.handler(item -> { + boolean emit; + synchronized (this) { + if (skipped < skip) { + emit = false; + skipped++; + } else { + emit = true; + } + } + if (emit) { + handler.handle(item); + } + }); + return this; + } + + @Override + public ReadStream pause() { + source.pause(); + return this; + } + + @Override + public ReadStream resume() { + return fetch(Long.MAX_VALUE); + } + + @Override + public ReadStream fetch(long l) { + long value; + synchronized (this) { + if (skipped < skip) { + if (l < Long.MAX_VALUE - skip + skipped) { + value = l + skip - skipped; + } else { + value = Long.MAX_VALUE; + } + } else { + value = l; + } + } + source.fetch(value); + return this; + } + + @Override + public ReadStream endHandler(Handler handler) { + source.endHandler(handler); + return this; + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/VertxBufferOutputStream.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/VertxBufferOutputStream.scala new file mode 100644 index 0000000..1f1cad2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/VertxBufferOutputStream.scala @@ -0,0 +1,20 @@ +package com.harana.modules.vertx.models.streams + +import io.vertx.core.buffer.Buffer + +import java.io.OutputStream + +class VertxBufferOutputStream extends OutputStream { + + val buffer = Buffer.buffer() + + override def write(b: Int) = + buffer.appendByte((b & 0xFF).toByte) + + override def write(b: Array[Byte]) = + buffer.appendBytes(b) + + override def write(b: Array[Byte], off: Int, len: Int) = + buffer.appendBytes(b, off, len) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/package.scala b/jvm/src/main/scala/com/harana/modules/vertx/package.scala new file mode 100644 index 0000000..8ff9a5f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/package.scala @@ -0,0 +1,266 @@ +package com.harana.modules + +import com.google.common.base.Strings +import com.harana.modules.core.app.App.runEffect +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.vertx.models._ +import com.harana.modules.vertx.models.streams.{BufferReadStream, GzipReadStream, InputStreamReadStream, Pump} +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.HttpHeaders.CONTENT_TYPE +import io.vertx.core.http._ +import io.vertx.core.{AsyncResult, Handler, MultiMap, Promise, Vertx => VX} +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import io.vertx.ext.web.templ.handlebars.HandlebarsTemplateEngine +import io.vertx.ext.web.{Router, RoutingContext} +import org.apache.logging.log4j.LogManager +import org.pac4j.core.config.{Config => Pac4jConfig} +import org.pac4j.core.context.session.SessionStore +import org.pac4j.vertx.auth.Pac4jAuthProvider +import org.pac4j.vertx.handler.impl.{SecurityHandler, SecurityHandlerOptions} +import zio.internal.Platform +import zio.{Runtime, Unsafe, ZEnvironment, ZIO} + +import java.io.{File, FileInputStream} +import java.nio.file.Path +import scala.jdk.CollectionConverters._ + +package object vertx { + + type Address = String + type WebSocketHeaders = MultiMap + + val logger = LogManager.getLogger("Vertx") + val corsRules = CrossOriginResourceSharing() + + def generateResponse(vx: VX, + logger: Logger, + micrometer: Micrometer, + templateEngine: HandlebarsTemplateEngine, + uploadsDirectory: Path, + rc: RoutingContext, + handler: RouteHandler, + secured: Boolean = false): Unit = + runEffect( + for { + sample <- micrometer.startTimer + _ <- logger.info("--------------------------------------------------------------------------") + _ <- logger.info(s"${rc.request().method().name()}: ${rc.request().uri()}") + _ <- logger.info(rc.request().headers().asScala.map { e => s"${e.getKey} - ${e.getValue}"}.mkString("\n\t\t")) + handler <- handler match { + case RouteHandler.Standard(handler) => + handler(rc) + + case RouteHandler.FileUpload(handler) => + for { + _ <- ZIO.succeed(rc.request().setExpectMultipart(true)) + handler <- handler(rc, uploadsDirectory, rc.fileUploads().asScala.toList) + } yield handler + + // Need to handle logic from here: https://github.com/vert-x3/vertx-web/blob/master/vertx-web/src/main/java/io/vertx/ext/web/handler/impl/BodyHandlerImpl.java + case RouteHandler.Stream(handler) => + for { + stream <- ZIO.succeed(ReactiveWriteStream.writeStream[Buffer](vx)) + pump = Pump(rc.request(), stream) + handler <- handler(rc, stream, pump) + } yield handler + } + + _ = handler match { + case Response.Buffer(buffer, gzipped, _, _, _, _, _) => + val brs = new BufferReadStream(buffer) + val rs = if (gzipped) new GzipReadStream(brs) else brs + val pump = Pump(rs, rc.response()) + rs.endHandler(_ => rc.response().close()) + pump.start() + + case Response.Content(content, contentType, cookies, statusCode, cors, headers) => + response(rc, contentType, cookies, statusCode, cors, headers).end(content) + + case Response.Empty(contentType, cookies, statusCode, cors, headers) => + response(rc, contentType, cookies, statusCode, cors, headers).end() + + case Response.File(filename, inputStream, gzipped, contentSize, contentType, cookies, statusCode, cors, headers) => + val r = response(rc, contentType, cookies, statusCode, cors, headers) + r.putHeader("Content-Disposition", s"attachment; filename=$filename;") + r.setChunked(true) + if (contentSize.nonEmpty) r.putHeader(HttpHeaders.CONTENT_LENGTH, contentSize.get.toString) + val isrs = new InputStreamReadStream(inputStream, vx) + val rs = if (gzipped) new GzipReadStream(isrs) else isrs + val pump = Pump(rs, r) + rs.endHandler(_ => r.end().onComplete((_: AsyncResult[Void]) => r.close())) + pump.start() + + case Response.InputStream(inputStream, gzipped, contentSize, contentType, cookies, statusCode, cors, headers) => + val r = response(rc, contentType, cookies, statusCode, cors, headers) + r.setChunked(true) + val isrs = new InputStreamReadStream(inputStream, vx) + if (contentSize.nonEmpty) r.putHeader(HttpHeaders.CONTENT_LENGTH, contentSize.get.toString) + val rs = if (gzipped) new GzipReadStream(isrs) else isrs + rs.endHandler(_ => r.end().onComplete((_: AsyncResult[Void]) => r.close())) + Pump(rs, r).start() + + case Response.JSON(json, contentType, cookies, statusCode, cors, headers) => + response(rc, contentType, cookies, statusCode, cors, headers).end(json.toString) + + case Response.ReadStream(stream, contentSize, contentType, cookies, statusCode, cors, headers) => + val r = response(rc, contentType, cookies, statusCode, cors, headers) + r.setChunked(true) + if (contentSize.nonEmpty) r.putHeader(HttpHeaders.CONTENT_LENGTH, contentSize.get.toString) + stream.endHandler(_ => r.end().onComplete((_: AsyncResult[Void]) => r.close())) + Pump(stream, r).start() + + case Response.Redirect(url, contentType, cookies, _, cors, headers) => + response(rc, contentType, cookies, Some(302), cors, headers).putHeader("location", url).end() + + case Response.Template(path, parameters, contentType, cookies, statusCode, cors, headers) => + templateEngine.render(parameters.asJava, path, new Handler[AsyncResult[Buffer]] { + override def handle(result: AsyncResult[Buffer]): Unit = + if (result.succeeded()) response(rc, contentType, cookies, statusCode, cors, headers).end(result.result()) + else { + result.cause().printStackTrace() + rc.fail(result.cause()) + } + }) + } + _ <- micrometer.stopTimer(sample, s"route_${rc.normalizedPath().substring(1).replaceAll("/", "_")}") + } yield () + ) + + + def anonymousAuth(vx: VX, + sessionStore: SessionStore, + config: Pac4jConfig, + authProvider: Pac4jAuthProvider, + url: String, + router: Router) = { + + val options = new SecurityHandlerOptions().setClients("AnonymousClient") + router.get(url).handler(new SecurityHandler(vx, sessionStore, config, authProvider, options)) + } + + + def sendFile(file: File, vx: VX, rc: RoutingContext) = { + val r = rc.response() + r.putHeader("Content-Disposition", s"attachment; filename=${file.getName};") + r.setChunked(true) + r.putHeader(HttpHeaders.CONTENT_LENGTH, file.length().toString) + val rs = new InputStreamReadStream(new FileInputStream(file), vx) + val pump = Pump(rs, r) + rs.endHandler(_ => { + r.end() + r.close() + }) + pump.start() + } + + def auth(vx: VX, + sessionStore: SessionStore, + config: Pac4jConfig, + authProvider: Pac4jAuthProvider, + router: Router, + url: String, + handler: Handler[RoutingContext], + clientNames: String, + authorizerName: Option[String]): Unit = { + + var options = new SecurityHandlerOptions().setClients(clientNames) + if (authorizerName.nonEmpty) options = options.setAuthorizers(authorizerName.get) + router.get(url).handler(new SecurityHandler(vx, sessionStore, config, authProvider, options)) + router.get(url).handler(setContentType(ContentType.HTML.value)) + router.get(url).handler(handler) + } + + + def setContentType(contentType: String): Handler[RoutingContext] = + (rc: RoutingContext) => { + rc.response.putHeader(CONTENT_TYPE, contentType) + rc.next() + } + +// FIXME +// def toHandler[R, A](runtime: Runtime[_], zio: ZIO[R, _, A], environment: ZEnvironment[R]) = +// (p: Promise[A]) => p.complete( +// Unsafe.unsafe { implicit unsafe => +// Runtime.default.unsafe.run(zio.provideEnvironment(environment)).getOrThrow() +// } +// ) + + + def getVersion = { + var version = System.getProperty("java.version") + if (version.startsWith("1.")) version = version.substring(2, 3) + else { + val dot = version.indexOf(".") + if (dot != -1) version = version.substring(0, dot) + } + version.toInt + } + + def defaultAllowedHeaders = + Set( + "x-requested-with", + "Access-Control-Allow-Origin", + "Access-Control-Allow-Methods", + "Access-Control-Allow-Headers", + "Access-Control-Allow-Credentials", + "origin", + "Content-Type", + "accept", + "Authorization") + + def defaultAllowedMethods = + Set( + HttpMethod.GET, + HttpMethod.POST, + HttpMethod.OPTIONS, + HttpMethod.DELETE, + HttpMethod.PATCH, + HttpMethod.PUT) + + def syncSockets(source: ServerWebSocket, target: WebSocket) = { + syncSocket(source, target) + syncSocket(target, source) + } + + private def syncSocket(first: WebSocketBase, second: WebSocketBase) = { + if (!first.isClosed) { + first.frameHandler(frame => { + if (frame.isClose) second.close() + if (frame.isContinuation) second.writeFrame(WebSocketFrame.continuationFrame(frame.binaryData(), frame.isFinal)) + + (frame.isFinal, frame.isBinary, frame.isText) match { + case (true, true, _) => second.writeFinalBinaryFrame(frame.binaryData()) + case (true, _, true) => second.writeFinalTextFrame(frame.textData()) + case (false, true, _) => second.writeBinaryMessage(frame.binaryData()) + case (false, _, true) => second.writeTextMessage(frame.textData()) + case (_, _, _) => + } + }) + } + } + + private def response(rc: RoutingContext, + contentType: Option[ContentType], + cookies: List[Cookie], + statusCode: Option[Int], + cors: Boolean, + headers: Map[_ <: CharSequence, List[_ <: CharSequence]]) = { + val response = rc.response() + if (contentType.nonEmpty) response.putHeader(CONTENT_TYPE, contentType.get.value) + cookies.foreach(response.addCookie) + if (statusCode.nonEmpty) response.setStatusCode(statusCode.get) + if (cors) { + val corsOrigin = rc.request().getHeader(HttpHeaders.ORIGIN) + if (!Strings.isNullOrEmpty(corsOrigin) && corsRules.isOriginAllowed(corsOrigin)) { + response.putHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN.toString, corsRules.getAllowedOrigin(corsOrigin)) + response.putHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS.toString, corsRules.allowedMethods.asJava) + } + } + headers.foreach { case (k, v) => + if (v.size == 1) response.putHeader(k.toString, v.head.toString) + else response.putHeader(k.toString, v.map(_.toString).asJava) + } + response + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/proxy/URIInfo.scala b/jvm/src/main/scala/com/harana/modules/vertx/proxy/URIInfo.scala new file mode 100644 index 0000000..840a8ec --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/proxy/URIInfo.scala @@ -0,0 +1,21 @@ +package com.harana.modules.vertx.proxy + +import java.net.URI + +case class URIInfo(uri: URI, domain: String) { + + def contextPath = domain + def proxyPath = pathInfo + (if (uri.getQuery != null) "?" else "") + def requestUrl = uri.toString + def requestUri = uri.getPath + (if (uri.getQuery != null) "?" else "") + uri.getQuery + def queryString = uri.getQuery + def scheme = uri.getScheme + def serverName = uri.getHost + def serverPort = uri.getPort + + def pathInfo = { + var result = uri.getPath.replaceFirst(domain, "") + if (!result.startsWith("/")) result = "/" + result + result + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/proxy/WSURI.scala b/jvm/src/main/scala/com/harana/modules/vertx/proxy/WSURI.scala new file mode 100644 index 0000000..2b05ed2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/proxy/WSURI.scala @@ -0,0 +1,3 @@ +package com.harana.modules.vertx.proxy + +case class WSURI(host: String, port: Int, prefixUri: String) diff --git a/jvm/src/main/scala/com/harana/modules/vertx/proxy/WebProxyClient.scala b/jvm/src/main/scala/com/harana/modules/vertx/proxy/WebProxyClient.scala new file mode 100644 index 0000000..ccf261a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/proxy/WebProxyClient.scala @@ -0,0 +1,242 @@ +package com.harana.modules.vertx.proxy + +import com.harana.designer.backend.modules.proxy._ +import io.netty.handler.codec.http.HttpResponseStatus +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.{Cookie, HttpHeaders} +import io.vertx.core.{AsyncResult, Handler} +import io.vertx.ext.web.RoutingContext +import io.vertx.ext.web.client.{HttpRequest, HttpResponse, WebClient} +import org.apache.commons.lang3.mutable.MutableBoolean + +import java.net.{HttpCookie, URI} +import java.util.Map.Entry +import java.util.function.Function +import scala.jdk.CollectionConverters._ + + +class WebProxyClient(client: WebClient, clientOptions: WebProxyClientOptions) { + + var serverRequestUriInfo: URIInfo = _ + var contentFilterEnabled = new MutableBoolean(false) + var uploadsDirectory: String = "file-uploads" + + var cookieFilterRequest: Function[String, Boolean] = _ + var cookieFilterResponse: Function[HttpCookie, Boolean] = _ + var headerFilterResponse: Function[Entry[String, String], Boolean] = _ + var contentFilter: Function[Buffer, Buffer] = _ + + + def execute(rc: RoutingContext, urlPattern: String, targetUri: URI): Unit = { + var domain = urlPattern.replace("/*", "") + if (domain.isEmpty) domain = "/" + serverRequestUriInfo = URIInfo(new URI(rc.request.absoluteURI), domain) + + val handler = requestHandler(rc, targetUri) + val method = rc.request.method + val proxyRequestUri = rewriteUrlFromRequest(targetUri, serverRequestUriInfo.queryString, serverRequestUriInfo.pathInfo) + + val proxyRequest = client.requestAbs(method, proxyRequestUri).ssl(targetUri.getScheme.equalsIgnoreCase("https")) + + val multipart = multipartForm(rc) + if (multipart) copyRequestHeadersForMultipartForm(rc, proxyRequest, targetUri) + else copyRequestHeaders(rc, proxyRequest, targetUri) + if (clientOptions.forwardIP) setXForwardedForHeader(rc, proxyRequest, serverRequestUriInfo.scheme) + + if (multipart) proxyRequest.sendMultipartForm(createMultipartForm(rc, uploadsDirectory), handler) + else { + val buffer = rc.getBody + if (buffer != null) { + val copy = buffer.copy().asInstanceOf[Buffer] + proxyRequest.headers.set(HttpHeaders.CONTENT_LENGTH, buffer.length.toString) + proxyRequest.sendBuffer(copy, handler) + } + else proxyRequest.send(handler) + } + } + + + private def requestHandler(rc: RoutingContext, targetUri: URI): Handler[AsyncResult[HttpResponse[Buffer]]] = asyncResult => { + try if (asyncResult.succeeded) { + val statusCode = asyncResult.result.statusCode() + rc.response.setStatusCode(statusCode) + rc.response.setStatusMessage(asyncResult.result.statusMessage()) + copyResponseHeaders(asyncResult.result, rc, targetUri, headerFilterResponse) + + if (statusCode == HttpResponseStatus.NOT_MODIFIED.code) { + rc.response.headers.set(HttpHeaders.CONTENT_LENGTH, "0") + } + else { + val proxyResponse = asyncResult.result + if (proxyResponse.body() != null) + if (!rc.response.closed && !rc.response.ended && rc.response.bytesWritten == 0) { + val buffer = proxyResponse.body().copy.asInstanceOf[Buffer] + //if (contentFilterEnabled.isTrue) buffer = contentFilter.apply(buffer) + rc.response.headers.set(HttpHeaders.CONTENT_LENGTH, String.valueOf(buffer.length)) + rc.response.write(buffer) + } + } + + if (!rc.response.closed && !rc.response.ended) rc.response.end() + } + catch { + case e: Exception => + e.printStackTrace() + rc.fail(e) + } + } + + + def rewriteUrlFromRequest(targetUri: URI, queryString: String, pathInfo: String) = { + val uri = new StringBuilder(500) + uri.append(targetUri) + + if (pathInfo != null) uri.append(encodeUriQuery(pathInfo, encodePercent = true)) + var fragment: String = null + var query = queryString + + if (query != null) { + val fragIdx = query.indexOf('#') + if (fragIdx >= 0) { + fragment = queryString.substring(fragIdx + 1) + query = query.substring(0, fragIdx) + } + } + + if (query != null && query.nonEmpty) { + uri.append('?') + uri.append(encodeUriQuery(query, encodePercent = false)) + } + if (clientOptions.sendUrlFragment && fragment != null) { + uri.append('#') + uri.append(encodeUriQuery(fragment, encodePercent = false)) + } + + uri.toString + } + + + def copyRequestHeaders(rc: RoutingContext, proxyRequest: HttpRequest[Buffer], targetObj: URI): Unit = { + val headers = rc.request.headers.iterator + while (headers.hasNext) { + val header = headers.next + val headerName = header.getKey + var headerValue = header.getValue + + if (!headerName.equalsIgnoreCase("Content-Length") && !hopByHopHeaders.contains(headerName)) { + if (!clientOptions.preserveHost && headerName.equalsIgnoreCase("Host")) { + headerValue = targetObj.getHost + if (targetObj.getPort != -1) headerValue += ":" + targetObj.getPort + } + else if (header.getKey.equalsIgnoreCase("Cookie")) headerValue = getRealCookie(headerValue, cookieFilterRequest) + proxyRequest.headers.set(headerName, headerValue) + } + } + } + + + def copyRequestHeadersForMultipartForm(rc: RoutingContext, proxyRequest: HttpRequest[Buffer], targetObj: URI): Unit = { + val headers = rc.request.headers.iterator + + while (headers.hasNext) { + val header = headers.next + val headerName = header.getKey + var headerValue = header.getValue + + if (!clientOptions.preserveHost && headerName.equalsIgnoreCase("Host")) { + headerValue = targetObj.getHost + if (targetObj.getPort != -1) headerValue += ":" + targetObj.getPort + proxyRequest.headers.set(headerName, headerValue) + } + else if (header.getKey.equalsIgnoreCase("Cookie")) { + headerValue = getRealCookie(headerValue, cookieFilterRequest) + proxyRequest.headers.set(headerName, headerValue) + } + else if (header.getKey.equalsIgnoreCase("Authorization")) { + proxyRequest.headers.set(headerName, headerValue) + } + } + } + + + def copyResponseHeaders(proxyResponse: HttpResponse[Buffer], rc: RoutingContext, targetUri: URI, filter: Function[Entry[String, String], Boolean]): Unit = { + + val headers = proxyResponse.headers() + .set("Content-Security-Policy", s"frame-ancestors ${clientOptions.iFrameAncestors.map(a => s"*.$a").mkString(" ")} 'self';") + val iterator = headers.iterator + + while (iterator.hasNext) { + val header = iterator.next + //if (filter != null) if (!filter.apply(header)) copyResponseHeader(proxyResponse, rc, targetUri, header) + //else + copyResponseHeader(proxyResponse, rc, targetUri, header) + } + } + + + def copyResponseHeader(proxyResponse: HttpResponse[Buffer], rc: RoutingContext, targetUri: URI, header: Entry[String, String]): Unit = { + val headerName = header.getKey + if (hopByHopHeaders.contains(headerName)) return + + val headerValue = header.getValue + if (headerName.equalsIgnoreCase("Set-Cookie") || headerName.equalsIgnoreCase("Set-Cookie2")) { + copyProxyCookie(rc, headerValue) + }else if (headerName.equalsIgnoreCase("Location")) + rc.response.headers.add(headerName, rewriteLocation(headerValue, serverRequestUriInfo.requestUrl)) + else rc.response.headers.add(headerName, headerValue) + } + + + def copyProxyCookie(rc: RoutingContext, headerValue: String): Unit = { + var path = "" + if (!clientOptions.preserveCookiesContextPath) path = serverRequestUriInfo.contextPath + if (!clientOptions.preserveCookiesProxyPath) path += serverRequestUriInfo.proxyPath + if (path.isEmpty) path = "/" + + for (cookie <- HttpCookie.parse(headerValue).asScala) { + //if (!cookieFilterResponse.apply(cookie)) { + val serverCookie = Cookie.cookie(cookie.getName, cookie.getValue) + val maxAge = cookie.getMaxAge().toInt + serverCookie.setMaxAge(Int.MaxValue) + serverCookie.setPath(path) + serverCookie.setSecure(cookie.getSecure) + rc.response.addCookie(serverCookie) + //} + } + } + + + def getRealCookie(cookieValue: String, filter: Function[String, Boolean]) = { + val escapedCookie = new StringBuilder + val cookies = cookieValue.split("[;,]") + for (cookie <- cookies) { + val cookieSplit = cookie.split("=") + if (cookieSplit.length == 2) { + var cookieName = cookieSplit(0).trim + //if (!filter.apply(cookieName)) { + if (escapedCookie.nonEmpty) escapedCookie.append("; ") + escapedCookie.append(cookieName).append("=").append(cookieSplit(1).trim) + //} + } + } + escapedCookie.toString + } + + + def rewriteUrlFromResponse(targetUri: URI, theUrl: String) = { + if (theUrl.startsWith(targetUri.toString)) { + val curUrl = new StringBuffer(serverRequestUriInfo.requestUrl) + var pos = curUrl.indexOf("://") + if (pos >= 0) { + pos = curUrl.indexOf("/", pos + 3) + if (pos >= 0) curUrl.setLength(pos) + } + curUrl.append(serverRequestUriInfo.contextPath) + curUrl.append(serverRequestUriInfo.proxyPath) + curUrl.append(theUrl, targetUri.toString.length, theUrl.length) + curUrl.toString + } else { + theUrl + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/proxy/WebProxyClientOptions.scala b/jvm/src/main/scala/com/harana/modules/vertx/proxy/WebProxyClientOptions.scala new file mode 100644 index 0000000..12d32c1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/proxy/WebProxyClientOptions.scala @@ -0,0 +1,11 @@ +package com.harana.modules.vertx.proxy + +case class WebProxyClientOptions(log: Boolean = false, + sendUrlFragment: Boolean = true, + preserveHost: Boolean = false, + preserveCookies: Boolean = true, + forwardIP: Boolean = true, + iFrameAncestors: List[String] = List(), + preserveCookiesContextPath: Boolean = true, + preserveCookiesProxyPath: Boolean = true, + ssl: Boolean = false) diff --git a/jvm/src/main/scala/com/harana/modules/vertx/proxy/package.scala b/jvm/src/main/scala/com/harana/modules/vertx/proxy/package.scala new file mode 100644 index 0000000..b4aea4b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/proxy/package.scala @@ -0,0 +1,147 @@ +package com.harana.designer.backend.modules + +import io.netty.handler.codec.http.HttpHeaderValues +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.HttpHeaders +import io.vertx.ext.web.RoutingContext +import io.vertx.ext.web.client.HttpRequest +import io.vertx.ext.web.multipart.MultipartForm + +import java.net.URLDecoder +import java.util.Formatter +import scala.collection.BitSet + +package object proxy { + + val hopByHopHeaders: Map[String, Boolean] = + Map( + "Connection" -> true, + "Keep-Alive" -> true, + "Proxy-Authenticate" -> true, + "Proxy-Authorization" -> true, + "TE" -> true, + "Trailers" -> true, + "Transfer-Encoding" -> true, + "Upgrade" -> true + ) + + + val asciiQueryCharacters: BitSet = { + var bitset = scala.collection.mutable.BitSet(128) + List( + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + "0123456789", + "_-!.~'()*", + ",;:$&+=", + "?/[]@","%").foreach(l => l.foreach(bitset += _.toInt)) + bitset + } + + def rewriteLocation(from: String, to: String) = { + val fromParts = urlParts(from) + val toParts = urlParts(to, true) + + (fromParts, toParts) match { + case ((_, Some(fromPath)), (Some(toHost), Some(toPath))) => s"$toHost/$toPath/$fromPath" + case ((_, Some(fromPath)), (Some(toHost), None)) => s"$toHost/$fromPath" + case ((_, None), (Some(toHost), Some(toPath))) => s"$toHost/$toPath" + case ((_, None), (Some(toHost), None)) => toHost + case ((Some(fromHost), None), (None, None)) => fromHost + case ((None, Some(fromPath)), (None, Some(toPath))) => s"$toPath/$fromPath" + case ((None, Some(fromPath)), (None, None)) => s"/$fromPath" + case _ => "" + } + } + + def urlParts(uri: String, ignorePath: Boolean = false): (Option[String], Option[String]) = { + val cleanUri = clean(uri) + val delimiter = cleanUri.indexOf("/", 8) + if (cleanUri.startsWith("http://")) + if (delimiter > 0) (Some(clean(cleanUri.substring(0, delimiter))), if (ignorePath) None else Some(clean(cleanUri.substring(delimiter, cleanUri.length())))) + else (Some(cleanUri), None) + else + (None, if (cleanUri.isEmpty || ignorePath) None else Some(cleanUri)) + } + + private def clean(uri: String) = { + var cleanUri = uri.trim() + if (cleanUri.startsWith("/")) cleanUri = cleanUri.drop(1) + if (cleanUri.endsWith("/") && !cleanUri.contains("?")) cleanUri = cleanUri.dropRight(1) + URLDecoder.decode(cleanUri, "UTF-8" ); + } + + def encodeUriQuery(in: CharSequence, encodePercent: Boolean): CharSequence = { + var outBuf: Option[StringBuffer] = None + var formatter: Option[Formatter] = None + + for (i <- 0 until in.length) { + val c = in.charAt(i) + var escape = true + + if (c < 128) { + if (asciiQueryCharacters(c.toInt) && !(encodePercent && c == '%')) escape = false + } else if (!Character.isISOControl(c) && !Character.isSpaceChar(c)) escape = false + + if (!escape) { + if (outBuf.nonEmpty) outBuf.get.append(c) + }else { + if (outBuf.isEmpty) { + outBuf = Some(new StringBuffer(in.length() + 5 * 3)) + outBuf.get.append(in, 0, i) + formatter = Some(new Formatter(outBuf.get)) + }else{ + formatter.get.format("%%%02X", Integer.valueOf(c.toInt)) + } + } + } + if (outBuf.nonEmpty) outBuf.get else in + } + + + def setXForwardedForHeader(routingContext: RoutingContext, proxyRequest: HttpRequest[Buffer], scheme: String): Unit = { + val forHeaderName = "X-Forwarded-For" + var forHeader = routingContext.request.remoteAddress.host + val existingForHeader = routingContext.request.headers.get(forHeaderName) + if (existingForHeader != null) forHeader = existingForHeader + ", " + forHeader + proxyRequest.headers.set(forHeaderName, forHeader) + proxyRequest.headers.set("X-Forwarded-Proto", scheme) + } + + + def getContentLength(routingContext: RoutingContext): Long = { + val contentLengthHeader = routingContext.request.headers.get(HttpHeaders.CONTENT_LENGTH) + if (contentLengthHeader != null) contentLengthHeader.toLong else -1L + } + + + def multipartForm(routingContext: RoutingContext) = { + val value = routingContext.request.getHeader(HttpHeaders.CONTENT_TYPE) + if (value != null) { + value.contains(HttpHeaderValues.MULTIPART_FORM_DATA.toString) + }else{ + false + } + } + + + def createMultipartForm(routingContext: RoutingContext, uploadsDirectory: String) = { + val result = MultipartForm.create + val formAttributes = routingContext.request.formAttributes + val formAttributesIterator = formAttributes.iterator + + while (formAttributesIterator.hasNext) { + val entry = formAttributesIterator.next + result.attribute(entry.getKey, entry.getValue) + } + + val uploadsIterator = routingContext.fileUploads.iterator + + while (uploadsIterator.hasNext) { + val uploadFile = uploadsIterator.next + val fileName = uploadFile.uploadedFileName.replace(uploadsDirectory + "\\", "").replace(uploadsDirectory + "/", "") + result.binaryFileUpload(uploadFile.name, uploadFile.fileName, uploadsDirectory + "/" + fileName, uploadFile.contentType) + } + result + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vfs/LiveVfs.scala b/jvm/src/main/scala/com/harana/modules/vfs/LiveVfs.scala new file mode 100644 index 0000000..1ad1aa9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vfs/LiveVfs.scala @@ -0,0 +1,165 @@ +package com.harana.modules.vfs + +import com.github.vfss3.S3FileProvider +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.HaranaFile +import net.lingala.zip4j.ZipFile +import org.apache.commons.io.IOUtils +import org.apache.commons.vfs2.impl.DefaultFileSystemManager +import org.apache.commons.vfs2.provider.bzip2.Bzip2FileProvider +import org.apache.commons.vfs2.provider.ftp.FtpFileProvider +import org.apache.commons.vfs2.provider.ftps.FtpsFileProvider +import org.apache.commons.vfs2.provider.gzip.GzipFileProvider +import org.apache.commons.vfs2.provider.hdfs.HdfsFileProvider +import org.apache.commons.vfs2.provider.http.HttpFileProvider +import org.apache.commons.vfs2.provider.https.HttpsFileProvider +import org.apache.commons.vfs2.provider.jar.JarFileProvider +import org.apache.commons.vfs2.provider.local.DefaultLocalFileProvider +import org.apache.commons.vfs2.provider.ram.RamFileProvider +import org.apache.commons.vfs2.provider.res.ResourceFileProvider +import org.apache.commons.vfs2.provider.sftp.SftpFileProvider +import org.apache.commons.vfs2.provider.tar.TarFileProvider +import org.apache.commons.vfs2.provider.temp.TemporaryFileProvider +import org.apache.commons.vfs2.provider.url.UrlFileProvider +import org.apache.commons.vfs2.{AllFileSelector, FileUtil, Selectors} + +import java.io.{File, InputStream, OutputStream} +//import org.apache.commons.vfs2.provider.webdav.WebdavFileProvider +import org.apache.commons.vfs2.provider.zip.ZipFileProvider +import zio.{Task, ZLayer, ZIO} + +object LiveVfs { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveVfs(config, logger, micrometer) + } +} + +case class LiveVfs(config: Config, logger: Logger, micrometer: Micrometer) extends Vfs { + + private val sourceManager = { + val fsm = new DefaultFileSystemManager() + fsm.addProvider("ftp", new FtpFileProvider) + fsm.addProvider("ftps", new FtpsFileProvider) + fsm.addProvider("hdfs", new HdfsFileProvider) + fsm.addProvider("http", new HttpFileProvider) + fsm.addProvider("https", new HttpsFileProvider) + fsm.addProvider("local", new DefaultLocalFileProvider) + fsm.addProvider("sftp", new SftpFileProvider) + fsm.addProvider("s3", new S3FileProvider) +// fsm.addProvider("webdav", new WebdavFileProvider) + fsm.addProvider("bzip2", new Bzip2FileProvider) + fsm.addProvider("gzip", new GzipFileProvider) + fsm.addProvider("jar", new JarFileProvider) + fsm.addProvider("ram", new RamFileProvider) + fsm.addProvider("res", new ResourceFileProvider) + fsm.addProvider("tar", new TarFileProvider) + fsm.addProvider("tmp", new TemporaryFileProvider) + fsm.addProvider("url", new UrlFileProvider) + fsm.addProvider("zip", new ZipFileProvider) + fsm.init() + fsm + } + + private val all = new AllFileSelector() + + + def mkdir(uri: String): Task[Unit] = + ZIO.attempt(file(uri).createFolder()) + + + def read(uri: String): Task[InputStream] = + ZIO.attempt(file(uri).getContent.getInputStream) + + + def read(uri: String, outputStream: OutputStream): Task[Unit] = + ZIO.attempt { + val content = file(uri).getContent + IOUtils.copy(content.getInputStream, outputStream) + content.close() + } + + + def readAsBytes(uri: String): Task[Array[Byte]] = + ZIO.attempt(FileUtil.getContent(file(uri))) + + + def write(uri: String, inputStream: InputStream): Task[Unit] = + ZIO.attempt { + val content = file(uri).getContent + IOUtils.copy(inputStream, content.getOutputStream) + content.close() + } + + + def copy(fromUri: String, toUri: String): Task[Unit] = + ZIO.attempt(file(toUri).copyFrom(file(fromUri), Selectors.SELECT_ALL)) + + + def move(fromUri: String, toUri: String): Task[Unit] = + ZIO.attempt(file(fromUri).moveTo(file(toUri))) + + + def duplicate(uri: String): Task[Unit] = + ZIO.attempt(file(duplicateName(file(uri))).copyFrom(file(uri), Selectors.SELECT_ALL)) + + + def delete(uri: String): Task[Unit] = + ZIO.attempt(if (file(uri).exists()) file(uri).delete()) + + + def exists(uri: String): Task[Boolean] = + ZIO.attempt(file(uri).exists()) + + + def info(uri: String): Task[HaranaFile] = + ZIO.attempt(toDataFile(file(uri))) + + + def list(uri: String): Task[List[HaranaFile]] = + ZIO.attempt(file(uri).getChildren.toList.map(toDataFile)) + + + def search(uri: String, query: String): Task[List[HaranaFile]] = { + val lowercaseQuery = query.toLowerCase + ZIO.attempt(file(uri).findFiles(all).toList.filter(_.getName.getBaseName.toLowerCase.contains(lowercaseQuery)).map(toDataFile)) + } + + def underlyingFile(uri: String): Task[File] = + ZIO.attempt(new File(file(uri).getName.getPath)) + + + def size(uri: String): Task[Long] = + ZIO.attempt(calculateSize(file(uri))) + + + def decompress(uri: String): Task[Unit] = + ZIO.attempt { + val outputDir = file(decompressName(file(uri))) + outputDir.createFolder() + new ZipFile(file(uri).getName.getPath).extractAll(outputDir.getName.getPath) + } + + + def compress(uri: String): Task[Unit] = + ZIO.attempt { + val inputFile = new File(file(uri).getName.getPath) + val outputFile = new File(file(compressName(file(uri), "zip")).getName.getPath) + if (file(uri).isFile) new ZipFile(outputFile).addFile(inputFile) else new ZipFile(outputFile).addFolder(inputFile) + } + + + def rename(uri: String, newName: String): Task[Option[Unit]] = { + val path = s"${file(uri).getParent}/$newName" + move(uri, s"${file(uri).getParent}/$newName").unless(file(path).exists()) + } + + + private def file(uri: String) = sourceManager.resolveFile(uri) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vfs/Vfs.scala b/jvm/src/main/scala/com/harana/modules/vfs/Vfs.scala new file mode 100644 index 0000000..003c224 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vfs/Vfs.scala @@ -0,0 +1,29 @@ +package com.harana.modules.vfs + +import com.harana.sdk.shared.models.HaranaFile +import zio.Task +import zio.macros.accessible + +import java.io.{File, InputStream, OutputStream} + +@accessible +trait Vfs { + def read(uri: String): Task[InputStream] + def read(uri: String, outputStream: OutputStream): Task[Unit] + def readAsBytes(uri: String): Task[Array[Byte]] + def write(uri: String, inputStream: InputStream): Task[Unit] + def copy(fromUri: String, toUri: String): Task[Unit] + def move(fromUri: String, toUri: String): Task[Unit] + def info(uri: String): Task[HaranaFile] + def mkdir(uri: String): Task[Unit] + def delete(uri: String): Task[Unit] + def exists(uri: String): Task[Boolean] + def duplicate(uri: String): Task[Unit] + def underlyingFile(uri: String): Task[File] + def list(uri: String): Task[List[HaranaFile]] + def search(uri: String, query: String): Task[List[HaranaFile]] + def size(uri: String): Task[Long] + def decompress(uri: String): Task[Unit] + def compress(uri: String): Task[Unit] + def rename(uri: String, newName: String): Task[Option[Unit]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vfs/package.scala b/jvm/src/main/scala/com/harana/modules/vfs/package.scala new file mode 100644 index 0000000..e7179df --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vfs/package.scala @@ -0,0 +1,79 @@ +package com.harana.modules + +import com.harana.sdk.shared.models.HaranaFile +import org.apache.commons.lang3.StringUtils +import org.apache.commons.vfs2.{AllFileSelector, FileObject} + +import java.time.Instant + +package object vfs { + + def calculateSize(file: FileObject): Long = { + if (file.isFile) file.getContent.getSize + else file.findFiles(new AllFileSelector()).map(f => if (f.isFolder) 0 else f.getContent.getSize).sum + } + + + def toDataFile(file: FileObject) = { + HaranaFile( + name = file.getName.getBaseName, + path = file.getName.getPath, + extension = if (StringUtils.isEmpty(file.getName.getExtension)) None else Some(file.getName.getExtension), + isFolder = file.isFolder, + created = Instant.ofEpochMilli(file.getContent.getLastModifiedTime), + updated = Instant.ofEpochMilli(file.getContent.getLastModifiedTime), + size = calculateSize(file), + tags = List() + ) + } + + + def nameWithoutExtension(file: FileObject) = + file.getName.getBaseName.replace(s".${file.getName.getExtension}", "") + + + def decompressName(file: FileObject) = { + var newName = nameWithoutExtension(file) + + while (newName.equals(file.getName.getBaseName) || file.getParent.getChild(newName) != null) { + val index = newName.substring(newName.lastIndexOf(" ") + 1, newName.length()) + if (index.forall(Character.isDigit)) { + val prefix = newName.substring(0, newName.lastIndexOf(" ")) + newName = s"$prefix ${Integer.valueOf(index)+1}" + }else + newName = s"$newName 2" + } + s"${file.getParent.getName.getURI}/$newName" + } + + + def compressName(file: FileObject, format: String) = { + var newName = nameWithoutExtension(file) + while (file.getParent.getChild(s"$newName.$format") != null) { + val index = newName.substring(newName.lastIndexOf(" ") + 1, newName.length()) + if (index.forall(Character.isDigit)) { + val prefix = newName.substring(0, newName.lastIndexOf(" ")) + newName = s"$prefix ${Integer.valueOf(index)+1}" + }else + newName = s"$newName 2" + } + s"${file.getParent.getName.getURI}/$newName.$format" + } + + + def duplicateName(file: FileObject) = { + var newName = nameWithoutExtension(file) + val suffix = if (StringUtils.isBlank(file.getName.getExtension)) "" else s".${file.getName.getExtension}" + + while (newName.equals(file.getName.getBaseName) || file.getParent.getChild(s"$newName$suffix") != null) { + val index = newName.substring(newName.lastIndexOf(" ") + 1, newName.length()) + if (index.forall(Character.isDigit)) { + val prefix = newName.substring(0, newName.lastIndexOf(" ")) + newName = s"$prefix ${Integer.valueOf(index)+1}" + }else{ + newName = if (!newName.endsWith(" copy")) s"$newName copy" else s"$newName 2" + } + } + s"${file.getParent.getName.getURI}/$newName$suffix" + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/zendesk/LiveZendesk.scala b/jvm/src/main/scala/com/harana/modules/zendesk/LiveZendesk.scala new file mode 100644 index 0000000..7e81ab8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/zendesk/LiveZendesk.scala @@ -0,0 +1,1978 @@ +package com.harana.modules.zendesk + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.zendesk.LiveZendesk.clientRef +import com.harana.modules.zendesk.models.ZendeskError +import org.zendesk.client.v2.Zendesk.Builder +import org.zendesk.client.v2.{Zendesk => CoreZendesk} +import org.zendesk.client.v2.model._ +import org.zendesk.client.v2.model.hc._ +import org.zendesk.client.v2.model.schedules.{Holiday, Schedule} +import org.zendesk.client.v2.model.targets.Target +import zio.{IO, ZIO, ZLayer} + +import java.io.File +import java.util.Date +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveZendesk { + private val clientRef = new AtomicReference[Option[CoreZendesk]](None) + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveZendesk(config, logger, micrometer) + } +} + +case class LiveZendesk(config: Config, logger: Logger, micrometer: Micrometer) extends Zendesk { + + private def client = + for { + client <- if (clientRef.get.nonEmpty) ZIO.succeed(clientRef.get.get) else + for { + url <- config.string("zendesk.url") + username <- config.secret("zendesk-username") + password <- config.secret("zendesk-password") + oauthToken <- config.secret("zendesk-oauth-token") + token <- config.secret("zendesk-token") + } yield { + new Builder(url) + .setUsername(username) + .setPassword(password) + .setOauthToken(oauthToken) + .setToken(token) + .build() + } + _ = clientRef.set(Some(client)) + } yield client + + + def getBrands: IO[ZendeskError, List[Brand]] = + for { + c <- client + r <- ZIO.attempt(c.getBrands) + } yield r + + + def getTicketForm(id: Long): IO[ZendeskError, TicketForm] = + for { + c <- client + r <- ZIO.attempt(c.getTicketForm(id)) + } yield r + + + def getTicketForms: IO[ZendeskError, List[TicketForm]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketForms) + } yield r + + + def createTicketForm(ticketForm: TicketForm): IO[ZendeskError, TicketForm] = + for { + c <- client + r <- ZIO.attempt(c.createTicketForm(ticketForm)) + } yield r + + + def importTicket(ticketImport: TicketImport): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.importTicket(ticketImport)) + } yield r + + + def importTickets(ticketImports: List[TicketImport]): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.importTicketsAsync(ticketImports)) + } yield r + + + def getRecentTickets: IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getRecentTickets) + } yield r + + + def getTickets: IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getTickets) + } yield r + + + def getTicketsIncrementally(startDate: Date, endDate: Date): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketsIncrementally(startDate, endDate)) + } yield r + + + def getTicketsByExternalId(externalId: String, includeArchived: Boolean): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketsByExternalId(externalId, includeArchived)) + } yield r + + + def getTicketsFromSearch(searchTerm: String): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketsFromSearch(searchTerm)) + } yield r + + + def getTicket(ticketId: Long): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.getTicket(ticketId)) + } yield r + + + def getTickets(ticketIds: List[Long]): IO[ZendeskError, List[Ticket]] = + ticketIds.headOption match { + case None => ZIO.attempt(List()) + case Some(x) => + for { + c <- client + r <- ZIO.attempt(c.getTickets(x, ticketIds.drop(1): _*)) + } yield r + } + + + def getTicketIncidents(ticketId: Long): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketIncidents(ticketId)) + } yield r + + + def getTicketCollaborators(ticketId: Long): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketCollaborators(ticketId)) + } yield r + + + def getOrganizationTickets(organizationId: Long): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationTickets(organizationId)) + } yield r + + + def getUserRequestedTickets(userId: Long): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getUserRequestedTickets(userId)) + } yield r + + + def permanentlyDeleteTicket(ticket: Ticket): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.permanentlyDeleteTicket(ticket)).map(_ => ()) + } yield r + + + def permanentlyDeleteTicket(ticketId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.permanentlyDeleteTicket(ticketId)).map(_ => ()) + } yield r + + + def deleteTicket(ticket: Ticket): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTicket(ticket)) + } yield r + + + def deleteTicket(ticketId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTicket(ticketId)) + } yield r + + + def createTicket(ticket: Ticket): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.createTicketAsync(ticket)) + } yield r + + + def createTickets(tickets: List[Ticket]): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.createTicketsAsync(tickets)) + } yield r + + + def updateTicket(ticket: Ticket): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.updateTicket(ticket)) + } yield r + + + def updateTickets(tickets: List[Ticket]): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.updateTicketsAsync(tickets)) + } yield r + + + def markTicketAsSpam(ticket: Ticket): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.markTicketAsSpam(ticket)) + } yield r + + + def deleteTickets(ticketIds: List[Long]): IO[ZendeskError, Unit] = + ticketIds.headOption match { + case None => ZIO.unit + case Some(x) => + for { + c <- client + r <- ZIO.attempt(c.deleteTickets(x, ticketIds.drop(1): _*)).mapError(handleException) + } yield r + } + + + def permanentlyDeleteTickets(ticketIds: List[Long]): IO[ZendeskError, Unit] = + ticketIds.headOption match { + case None => ZIO.unit + case Some(x) => + for { + c <- client + r <- ZIO.attempt(c.permanentlyDeleteTickets(x, ticketIds.drop(1): _*)).mapBoth(handleException, _ => ()) + } yield r + } + + + def getComplianceDeletionStatuses(userId: Long): IO[ZendeskError, List[ComplianceDeletionStatus]] = + for { + c <- client + r <- ZIO.attempt(c.getComplianceDeletionStatuses(userId)) + } yield r + + + def getUserCCDTickets(userId: Long): IO[ZendeskError, List[Ticket]] = + for { + c <- client + r <- ZIO.attempt(c.getUserCCDTickets(userId)) + } yield r + + + def getUserRelatedInfo(userId: Long): IO[ZendeskError, UserRelatedInfo] = + for { + c <- client + r <- ZIO.attempt(c.getUserRelatedInfo(userId)) + } yield r + + + def getTicketMetrics: IO[ZendeskError, List[Metric]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketMetrics) + } yield r + + + def getTicketMetricByTicket(id: Long): IO[ZendeskError, Metric] = + for { + c <- client + r <- ZIO.attempt(c.getTicketMetricByTicket(id)) + } yield r + + + def getTicketMetric(id: Long): IO[ZendeskError, Metric] = + for { + c <- client + r <- ZIO.attempt(c.getTicketMetric(id)) + } yield r + + + def getTicketAudits(ticket: Ticket): IO[ZendeskError, List[Audit]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketAudits(ticket)) + } yield r + + + def getTicketAudits(id: Long): IO[ZendeskError, List[Audit]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketAudits(id)) + } yield r + + + def getTicketAudit(ticket: Ticket, audit: Audit): IO[ZendeskError, Audit] = + for { + c <- client + r <- ZIO.attempt(c.getTicketAudit(ticket, audit)) + } yield r + + + def getTicketAudit(ticket: Ticket, auditId: Long): IO[ZendeskError, Audit] = + for { + c <- client + r <- ZIO.attempt(c.getTicketAudit(ticket, auditId)) + } yield r + + + def getTicketAudit(ticketId: Long, auditId: Long): IO[ZendeskError, Audit] = + for { + c <- client + r <- ZIO.attempt(c.getTicketAudit(ticketId, auditId)) + } yield r + + + def trustTicketAudit(ticket: Ticket, audit: Audit): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.trustTicketAudit(ticket, audit)) + } yield r + + + def trustTicketAudit(ticket: Ticket, auditId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.trustTicketAudit(ticket, auditId)) + } yield r + + + def trustTicketAudit(ticketId: Long, auditId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.trustTicketAudit(ticketId, auditId)) + } yield r + + + def makePrivateTicketAudit(ticket: Ticket, audit: Audit): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.makePrivateTicketAudit(ticket, audit)) + } yield r + + + def makePrivateTicketAudit(ticket: Ticket, auditId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.makePrivateTicketAudit(ticket, auditId)) + } yield r + + + def makePrivateTicketAudit(ticketId: Long, auditId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.makePrivateTicketAudit(ticketId, auditId)) + } yield r + + + def getTicketFields: IO[ZendeskError, List[Field]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketFields) + } yield r + + + def getTicketField(ticketFieldId: Long): IO[ZendeskError, Field] = + for { + c <- client + r <- ZIO.attempt(c.getTicketField(ticketFieldId)) + } yield r + + + def createTicketField(field: Field): IO[ZendeskError, Field] = + for { + c <- client + r <- ZIO.attempt(c.createTicketField(field)) + } yield r + + + def updateTicketField(field: Field): IO[ZendeskError, Field] = + for { + c <- client + r <- ZIO.attempt(c.updateTicketField(field)) + } yield r + + + def deleteTicketField(field: Field): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTicketField(field)) + } yield r + + + def deleteTicketField(ticketFieldId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTicketField(ticketFieldId)) + } yield r + + + def getSuspendedTickets: IO[ZendeskError, List[SuspendedTicket]] = + for { + c <- client + r <- ZIO.attempt(c.getSuspendedTickets) + } yield r + + + def deleteSuspendedTicket(ticket: SuspendedTicket): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteSuspendedTicket(ticket)) + } yield r + + + def deleteSuspendedTicket(ticketId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteSuspendedTicket(ticketId)) + } yield r + + + def createUpload(fileName: String, content: Array[Byte]): IO[ZendeskError, Attachment.Upload] = + for { + c <- client + r <- ZIO.attempt(c.createUpload(fileName, content)) + } yield r + + + def createUpload(fileName: String, contentType: String, content: Array[Byte]): IO[ZendeskError, Attachment.Upload] = + for { + c <- client + r <- ZIO.attempt(c.createUpload(fileName, contentType, content)) + } yield r + + + def createUpload(token: String, fileName: String, contentType: String, content: Array[Byte]): IO[ZendeskError, Attachment.Upload] = + for { + c <- client + r <- ZIO.attempt(c.createUpload(token, fileName, contentType, content)) + } yield r + + + def deleteUpload(upload: Attachment.Upload): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUpload(upload)) + } yield r + + + def deleteUpload(token: String): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUpload(token)) + } yield r + + + def getArticlesFromSearch(searchTerm: String): IO[ZendeskError, List[Article]] = + for { + c <- client + r <- ZIO.attempt(c.getArticleFromSearch(searchTerm)) + } yield r + + + def getArticlesFromSearch(searchTerm: String, sectionId: Long): IO[ZendeskError, List[Article]] = + for { + c <- client + r <- ZIO.attempt(c.getArticleFromSearch(searchTerm, sectionId)) + } yield r + + + def getAttachmentsFromArticle(articleId: Long): IO[ZendeskError, List[ArticleAttachments]] = + for { + c <- client + r <- ZIO.attempt(c.getAttachmentsFromArticle(articleId)) + } yield r + + + def associateAttachmentsToArticle(articleId: Long, attachment: List[Attachment]): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.associateAttachmentsToArticle(articleId.toString, attachment)) + } yield r + + + def createUploadArticle(articleId: Long, file: File, inline: Boolean = false): IO[ZendeskError, ArticleAttachments] = + for { + c <- client + r <- ZIO.attempt(c.createUploadArticle(articleId, file, inline)) + } yield r + + + def getAttachment(attachment: Attachment): IO[ZendeskError, Attachment] = + for { + c <- client + r <- ZIO.attempt(c.getAttachment(attachment)) + } yield r + + + def getAttachment(attachmentId: Long): IO[ZendeskError, Attachment] = + for { + c <- client + r <- ZIO.attempt(c.getAttachment(attachmentId)) + } yield r + + + def deleteAttachment(attachmentId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteAttachment(attachmentId)) + } yield r + + + def getTargets: IO[ZendeskError, List[Target]] = + for { + c <- client + r <- ZIO.attempt(c.getTargets) + } yield r + + + def getTarget(targetId: Long): IO[ZendeskError, Target] = + for { + c <- client + r <- ZIO.attempt(c.getTarget(targetId)) + } yield r + + + def createTarget(target: Target): IO[ZendeskError, Target] = + for { + c <- client + r <- ZIO.attempt(c.createTarget(target: Target)) + } yield r + + + def deleteTarget(targetId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTarget(targetId)) + } yield r + + + def getTriggers: IO[ZendeskError, List[Trigger]] = + for { + c <- client + r <- ZIO.attempt(c.getTriggers) + } yield r + + + def getTrigger(triggerId: Long): IO[ZendeskError, Trigger] = + for { + c <- client + r <- ZIO.attempt(c.getTrigger(triggerId)) + } yield r + + + def createTrigger(trigger: Trigger): IO[ZendeskError, Trigger] = + for { + c <- client + r <- ZIO.attempt(c.createTrigger(trigger)) + } yield r + + + def updateTrigger(triggerId: Long, trigger: Trigger): IO[ZendeskError, Trigger] = + for { + c <- client + r <- ZIO.attempt(c.updateTrigger(triggerId, trigger)) + } yield r + + + def deleteTrigger(triggerId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTrigger(triggerId)) + } yield r + + + def getAutomations: IO[ZendeskError, List[Automation]] = + for { + c <- client + r <- ZIO.attempt(c.getAutomations) + } yield r + + + def getAutomation(id: Long): IO[ZendeskError, Automation] = + for { + c <- client + r <- ZIO.attempt(c.getAutomation(id)) + } yield r + + + def createAutomation(automation: Automation): IO[ZendeskError, Automation] = + for { + c <- client + r <- ZIO.attempt(c.createAutomation(automation)) + } yield r + + + def updateAutomation(automationId: Long, automation: Automation): IO[ZendeskError, Automation] = + for { + c <- client + r <- ZIO.attempt(c.updateAutomation(automationId, automation)) + } yield r + + + def deleteAutomation(automationId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteAutomation(automationId)) + } yield r + + + def getTwitterMonitors: IO[ZendeskError, List[TwitterMonitor]] = + for { + c <- client + r <- ZIO.attempt(c.getTwitterMonitors) + } yield r + + + def getUsers: IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.getUsers) + } yield r + + + def getUsersByRoles(roles: List[String]): IO[ZendeskError, List[User]] = + roles.headOption match { + case None => ZIO.attempt(List()) + case Some(x) => + for { + c <- client + r <- ZIO.attempt(c.getUsersByRole(x, roles.drop(1): _*)) + } yield r + } + + + def getUsersIncrementally(startTime: Date): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.getUsersIncrementally(startTime)) + } yield r + + + def getGroupUsers(groupId: Long): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.getGroupUsers(groupId)) + } yield r + + + def getOrganizationUsers(organizationId: Long): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationUsers(organizationId)) + } yield r + + + def getUser(userId: Long): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.getUser(userId)) + } yield r + + + def getAuthenticatedUser: IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.getAuthenticatedUser) + } yield r + + + def getUserFields: IO[ZendeskError, List[UserField]] = + for { + c <- client + r <- ZIO.attempt(c.getUserFields) + } yield r + + + def createUser(user: User): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.createUser(user)) + } yield r + + + def createUsers(users: List[User]): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.createUsersAsync(users)) + } yield r + + + def createOrUpdateUser(user: User): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.createOrUpdateUser(user)) + } yield r + + + def updateUser(user: User): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.updateUser(user)) + } yield r + + + def deleteUser(user: User): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUser(user)) + } yield r + + + def deleteUser(userId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUser(userId)) + } yield r + + + def permanentlyDeleteUser(user: User): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.permanentlyDeleteUser(user)) + } yield r + + + def permanentlyDeleteUser(userId: Long): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.permanentlyDeleteUser(userId)) + } yield r + + + def suspendUser(userId: Long): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.suspendUser(userId)) + } yield r + + + def unsuspendUser(userId: Long): IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.unsuspendUser(userId)) + } yield r + + + def lookupUserByEmail(email: String): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.lookupUserByEmail(email)) + } yield r + + + def lookupUserByExternalId(externalId: String): IO[ZendeskError, List[User]] = + for { + c <- client + r <- ZIO.attempt(c.lookupUserByExternalId(externalId)) + } yield r + + + def getCurrentUser: IO[ZendeskError, User] = + for { + c <- client + r <- ZIO.attempt(c.getCurrentUser) + } yield r + + + def resetUserPassword(user: User, password: String): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.resetUserPassword(user, password)) + } yield r + + + def resetUserPassword(userId: Long, password: String): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.resetUserPassword(userId, password)) + } yield r + + + def changeUserPassword(user: User, oldPassword: String, newPassword: String): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.changeUserPassword(user, oldPassword, newPassword)) + } yield r + + + def getUserIdentities(user: User): IO[ZendeskError, List[Identity]] = + for { + c <- client + r <- ZIO.attempt(c.getUserIdentities(user)) + } yield r + + + def getUserIdentities(userId: Long): IO[ZendeskError, List[Identity]] = + for { + c <- client + r <- ZIO.attempt(c.getUserIdentities(userId)) + } yield r + + + def getUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.getUserIdentity(user, identity)) + } yield r + + + def getUserIdentity(user: User, identityId: Long): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.getUserIdentity(user, identityId)) + } yield r + + + def getUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.getUserIdentity(userId, identityId)) + } yield r + + + def setUserPrimaryIdentity(user: User, identity: Identity): IO[ZendeskError, List[Identity]] = + for { + c <- client + r <- ZIO.attempt(c.setUserPrimaryIdentity(user, identity)) + } yield r + + + def setUserPrimaryIdentity(user: User, identityId: Long): IO[ZendeskError, List[Identity]] = + for { + c <- client + r <- ZIO.attempt(c.setUserPrimaryIdentity(user, identityId)) + } yield r + + + def setUserPrimaryIdentity(userId: Long, identityId: Long): IO[ZendeskError, List[Identity]] = + for { + c <- client + r <- ZIO.attempt(c.setUserPrimaryIdentity(userId, identityId)) + } yield r + + + def verifyUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.verifyUserIdentity(user, identity)) + } yield r + + + def verifyUserIdentity(user: User, identityId: Long): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.verifyUserIdentity(user, identityId)) + } yield r + + + def verifyUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.verifyUserIdentity(userId, identityId)) + } yield r + + + def requestVerifyUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.requestVerifyUserIdentity(user, identity)) + } yield r + + + def requestVerifyUserIdentity(user: User, identityId: Long): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.requestVerifyUserIdentity(user, identityId)) + } yield r + + + def requestVerifyUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.requestVerifyUserIdentity(userId, identityId)) + } yield r + + + def updateUserIdentity(userId: Long, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.updateUserIdentity(userId, identity)) + } yield r + + + def updateUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.updateUserIdentity(user, identity)) + } yield r + + + def deleteUserIdentity(user: User, identity: Identity): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUserIdentity(user, identity)) + } yield r + + + def deleteUserIdentity(user: User, identityId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUserIdentity(user, identityId)) + } yield r + + + def deleteUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteUserIdentity(userId, identityId)) + } yield r + + + def createUserIdentity(userId: Long, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.createUserIdentity(userId, identity)) + } yield r + + + def createUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] = + for { + c <- client + r <- ZIO.attempt(c.createUserIdentity(user, identity)) + } yield r + + + def getCustomAgentRoles: IO[ZendeskError, List[AgentRole]] = + for { + c <- client + r <- ZIO.attempt(c.getCustomAgentRoles) + } yield r + + + def getRequests: IO[ZendeskError, List[Request]] = + for { + c <- client + r <- ZIO.attempt(c.getRequests) + } yield r + + + def getOpenRequests: IO[ZendeskError, List[Request]] = + for { + c <- client + r <- ZIO.attempt(c.getOpenRequests) + } yield r + + + def getSolvedRequests: IO[ZendeskError, List[Request]] = + for { + c <- client + r <- ZIO.attempt(c.getSolvedRequests) + } yield r + + + def getCCRequests: IO[ZendeskError, List[Request]] = + for { + c <- client + r <- ZIO.attempt(c.getCCRequests) + } yield r + + + def getUserRequests(user: User): IO[ZendeskError, List[Request]] = + for { + c <- client + r <- ZIO.attempt(c.getUserRequests(user)) + } yield r + + + def getUserRequests(id: Long): IO[ZendeskError, List[Request]] = + for { + c <- client + r <- ZIO.attempt(c.getUserRequests(id)) + } yield r + + + def getRequest(id: Long): IO[ZendeskError, Request] = + for { + c <- client + r <- ZIO.attempt(c.getRequest(id)) + } yield r + + + def createRequest(request: Request): IO[ZendeskError, Request] = + for { + c <- client + r <- ZIO.attempt(c.createRequest(request)) + } yield r + + + def updateRequest(request: Request): IO[ZendeskError, Request] = + for { + c <- client + r <- ZIO.attempt(c.updateRequest(request)) + } yield r + + + def getRequestComments(request: Request): IO[ZendeskError, List[Comment]] = + for { + c <- client + r <- ZIO.attempt(c.getRequestComments(request)) + } yield r + + + def getRequestComments(id: Long): IO[ZendeskError, List[Comment]] = + for { + c <- client + r <- ZIO.attempt(c.getRequestComments(id)) + } yield r + + + def getTicketComments(id: Long): IO[ZendeskError, List[Comment]] = + for { + c <- client + r <- ZIO.attempt(c.getTicketComments(id)) + } yield r + + + def getRequestComment(request: Request, comment: Comment): IO[ZendeskError, Comment] = + for { + c <- client + r <- ZIO.attempt(c.getRequestComment(request, comment)) + } yield r + + + def getRequestComment(request: Request, commentId: Long): IO[ZendeskError, Comment] = + for { + c <- client + r <- ZIO.attempt(c.getRequestComment(request, commentId)) + } yield r + + + def getRequestComment(requestId: Long, commentId: Long): IO[ZendeskError, Comment] = + for { + c <- client + r <- ZIO.attempt(c.getRequestComment(requestId, commentId)) + } yield r + + + def createComment(ticketId: Long, comment: Comment): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.createComment(ticketId, comment)) + } yield r + + + def createTicketFromTweet(tweetId: Long, monitorId: Long): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.createTicketFromTweet(tweetId, monitorId)) + } yield r + + + def getOrganizations: IO[ZendeskError, List[Organization]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizations) + } yield r + + + def getOrganizationsIncrementally(startTime: Date): IO[ZendeskError, List[Organization]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationsIncrementally(startTime)) + } yield r + + + def getOrganizationFields: IO[ZendeskError, List[OrganizationField]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationFields) + } yield r + + + def getAutoCompleteOrganizations(name: String): IO[ZendeskError, List[Organization]] = + for { + c <- client + r <- ZIO.attempt(c.getAutoCompleteOrganizations(name)) + } yield r + + + def getOrganization(id: Long): IO[ZendeskError, Organization] = + for { + c <- client + r <- ZIO.attempt(c.getOrganization(id)) + } yield r + + + def createOrganization(organization: Organization): IO[ZendeskError, Organization] = + for { + c <- client + r <- ZIO.attempt(c.createOrganization(organization)) + } yield r + + + def createOrganizations(organizations: List[Organization]): IO[ZendeskError, List[Organization]] = + for { + c <- client + r <- ZIO.attempt(c.createOrganizationsAsync(organizations)) + } yield r + + + def updateOrganization(organization: Organization): IO[ZendeskError, Organization] = + for { + c <- client + r <- ZIO.attempt(c.updateOrganization(organization)) + } yield r + + + def deleteOrganization(organization: Organization): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteOrganization(organization)) + } yield r + + + def deleteOrganization(id: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteOrganization(id)) + } yield r + + + def lookupOrganizationsByExternalId(externalId: String): IO[ZendeskError, List[Organization]] = + for { + c <- client + r <- ZIO.attempt(c.lookupOrganizationsByExternalId(externalId)) + } yield r + + + def getOrganizationMemberships: IO[ZendeskError, List[OrganizationMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationMemberships) + } yield r + + + def getOrganizationMembershipsForOrg(organizationId: Long): IO[ZendeskError, List[OrganizationMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationMembershipsForOrg(organizationId)) + } yield r + + + def getOrganizationMembershipsForUser(userId: Long): IO[ZendeskError, List[OrganizationMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationMembershipsForUser(userId)) + } yield r + + + def getOrganizationMembershipForUser(userId: Long, id: Long): IO[ZendeskError, OrganizationMembership] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationMembershipForUser(userId, id)) + } yield r + + + def getOrganizationMembership(id: Long): IO[ZendeskError, OrganizationMembership] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationMembership(id)) + } yield r + + + def createOrganizationMembership(organizationMembership: OrganizationMembership): IO[ZendeskError, OrganizationMembership] = + for { + c <- client + r <- ZIO.attempt(c.createOrganizationMembership(organizationMembership: OrganizationMembership)) + } yield r + + + def deleteOrganizationMembership(id: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteOrganizationMembership(id)) + } yield r + + + def getGroups: IO[ZendeskError, List[Group]] = + for { + c <- client + r <- ZIO.attempt(c.getGroups) + } yield r + + + def getAssignableGroups: IO[ZendeskError, List[Group]] = + for { + c <- client + r <- ZIO.attempt(c.getAssignableGroups) + } yield r + + + def getGroup(groupId: Long): IO[ZendeskError, Group] = + for { + c <- client + r <- ZIO.attempt(c.getGroup(groupId)) + } yield r + + + def createGroup(group: Group): IO[ZendeskError, Group] = + for { + c <- client + r <- ZIO.attempt(c.createGroup(group)) + } yield r + + + def updateGroup(group: Group): IO[ZendeskError, Group] = + for { + c <- client + r <- ZIO.attempt(c.updateGroup(group)) + } yield r + + + def deleteGroup(group: Group): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteGroup(group)) + } yield r + + + def deleteGroup(groupId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteGroup(groupId)) + } yield r + + + def getMacros: IO[ZendeskError, List[Macro]] = + for { + c <- client + r <- ZIO.attempt(c.getMacros) + } yield r + + + def getMacro(macroId: Long): IO[ZendeskError, Macro] = + for { + c <- client + r <- ZIO.attempt(c.getMacro(macroId)) + } yield r + + + def createMacro(`macro`: Macro): IO[ZendeskError, Macro] = + for { + c <- client + r <- ZIO.attempt(c.createMacro(`macro`)) + } yield r + + + def updateMacro(macroId: Long, `macro`: Macro): IO[ZendeskError, Macro] = + for { + c <- client + r <- ZIO.attempt(c.updateMacro(macroId, `macro`)) + } yield r + + + def macrosShowChangesToTicket(macroId: Long): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.macrosShowChangesToTicket(macroId)) + } yield r + + + def macrosShowTicketAfterChanges(ticketId: Long, macroId: Long): IO[ZendeskError, Ticket] = + for { + c <- client + r <- ZIO.attempt(c.macrosShowTicketAfterChanges(ticketId, macroId)) + } yield r + + + def addTagToTicket(ticketId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.addTagToTicket(ticketId, tags: _*)) + } yield r + + + def addTagToTopic(topicId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.addTagToTopics(topicId, tags: _*)) + } yield r + + + def addTagToOrganization(organizationId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.addTagToOrganisations(organizationId, tags: _*)) + } yield r + + + def setTagOnTicket(tagId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.setTagOnTicket(tagId, tags: _*)) + } yield r + + + def setTagOnTopic(topicId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.setTagOnTopics(topicId, tags: _*)) + } yield r + + + def setTagOnOrganisation(organizationId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.setTagOnOrganisations(organizationId, tags: _*)) + } yield r + + + def removeTagFromTicket(ticketId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.removeTagFromTicket(ticketId, tags: _*)) + } yield r + + + def removeTagFromTopic(topicId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.removeTagFromTopics(topicId, tags: _*)) + } yield r + + + def removeTagFromOrganisation(organizationId: Long, tags: List[String]): IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.removeTagFromOrganisations(organizationId, tags: _*)) + } yield r + + + def getIncrementalTicketsResult(unixEpochTime: Long): IO[ZendeskError, Map[Any, Any]] = + for { + c <- client + r <- ZIO.attempt(c.getIncrementalTicketsResult(unixEpochTime).asScala.toMap[Any, Any]).mapError(handleException) + } yield r + + + def getGroupMemberships: IO[ZendeskError, List[GroupMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getGroupMemberships) + } yield r + + + def getGroupMembershipByUser(userId: Long): IO[ZendeskError, List[GroupMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getGroupMembershipByUser(userId)) + } yield r + + + def getGroupMemberships(groupId: Long): IO[ZendeskError, List[GroupMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getGroupMemberships(groupId)) + } yield r + + + def getAssignableGroupMemberships: IO[ZendeskError, List[GroupMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getAssignableGroupMemberships) + } yield r + + + def getAssignableGroupMemberships(groupId: Long): IO[ZendeskError, List[GroupMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getAssignableGroupMemberships(groupId)) + } yield r + + + def getGroupMembership(groupMembershipId: Long): IO[ZendeskError, GroupMembership] = + for { + c <- client + r <- ZIO.attempt(c.getGroupMembership(groupMembershipId)) + } yield r + + + def getGroupMembership(userId: Long, groupMembershipId: Long): IO[ZendeskError, GroupMembership] = + for { + c <- client + r <- ZIO.attempt(c.getGroupMembership(userId, groupMembershipId)) + } yield r + + + def createGroupMembership(groupMembership: GroupMembership): IO[ZendeskError, GroupMembership] = + for { + c <- client + r <- ZIO.attempt(c.createGroupMembership(groupMembership: GroupMembership)) + } yield r + + + def createGroupMembership(userId: Long, groupMembership: GroupMembership): IO[ZendeskError, GroupMembership] = + for { + c <- client + r <- ZIO.attempt(c.createGroupMembership(userId, groupMembership)) + } yield r + + + def deleteGroupMembership(groupMembership: GroupMembership): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteGroupMembership(groupMembership)) + } yield r + + + def deleteGroupMembership(id: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteGroupMembership(id)) + } yield r + + + def deleteGroupMembership(userId: Long, groupMembership: GroupMembership): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteGroupMembership(userId, groupMembership)) + } yield r + + + def deleteGroupMembership(userId: Long, groupMembershipId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteGroupMembership(userId, groupMembershipId)) + } yield r + + + def setGroupMembershipAsDefault(userId: Long, groupMembership: GroupMembership): IO[ZendeskError, List[GroupMembership]] = + for { + c <- client + r <- ZIO.attempt(c.setGroupMembershipAsDefault(userId, groupMembership)) + } yield r + + + def getForums: IO[ZendeskError, List[Forum]] = + for { + c <- client + r <- ZIO.attempt(c.getForums) + } yield r + + + def getForums(categoryId: Long): IO[ZendeskError, List[Forum]] = + for { + c <- client + r <- ZIO.attempt(c.getForums(categoryId)) + } yield r + + + def getForum(forumId: Long): IO[ZendeskError, Forum] = + for { + c <- client + r <- ZIO.attempt(c.getForum(forumId)) + } yield r + + + def createForum(forum: Forum): IO[ZendeskError, Forum] = + for { + c <- client + r <- ZIO.attempt(c.createForum(forum)) + } yield r + + + def updateForum(forum: Forum): IO[ZendeskError, Forum] = + for { + c <- client + r <- ZIO.attempt(c.updateForum(forum)) + } yield r + + + def deleteForum(forum: Forum): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteForum(forum)) + } yield r + + + def getTopics: IO[ZendeskError, List[Topic]] = + for { + c <- client + r <- ZIO.attempt(c.getTopics) + } yield r + + + def getTopics(forumId: Long): IO[ZendeskError, List[Topic]] = + for { + c <- client + r <- ZIO.attempt(c.getTopics(forumId)) + } yield r + + + def getTopics(topicIds: List[Long]): IO[ZendeskError, List[Topic]] = + topicIds.headOption match { + case None => ZIO.attempt(List()) + case Some(x) => + for { + c <- client + r <- ZIO.attempt(c.getTopics(x, topicIds.drop(1): _*)) + } yield r + } + + + def getTopicsByUser(userId: Long): IO[ZendeskError, List[Topic]] = + for { + c <- client + r <- ZIO.attempt(c.getTopicsByUser(userId)) + } yield r + + + def getTopic(topicId: Long): IO[ZendeskError, Topic] = + for { + c <- client + r <- ZIO.attempt(c.getTopic(topicId)) + } yield r + + + def createTopic(topic: Topic): IO[ZendeskError, Topic] = + for { + c <- client + r <- ZIO.attempt(c.createTopic(topic)) + } yield r + + + def importTopic(topic: Topic): IO[ZendeskError, Topic] = + for { + c <- client + r <- ZIO.attempt(c.importTopic(topic)) + } yield r + + + def updateTopic(topic: Topic): IO[ZendeskError, Topic] = + for { + c <- client + r <- ZIO.attempt(c.updateTopic(topic)) + } yield r + + + def deleteTopic(topic: Topic): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteTopic(topic)) + } yield r + + + def getOrganizationMembershipsByUser(userId: Long): IO[ZendeskError, List[OrganizationMembership]] = + for { + c <- client + r <- ZIO.attempt(c.getOrganizationMembershipByUser(userId)) + } yield r + + + def getGroupOrganization(userId: Long, organizationMembershipId: Long): IO[ZendeskError, OrganizationMembership] = + for { + c <- client + r <- ZIO.attempt(c.getGroupOrganization(userId, organizationMembershipId)) + } yield r + + + def createOrganizationMembership(userId: Long, organizationMembership: OrganizationMembership): IO[ZendeskError, OrganizationMembership] = + for { + c <- client + r <- ZIO.attempt(c.createOrganizationMembership(userId, organizationMembership)) + } yield r + + + def deleteOrganizationMembership(userId: Long, organizationMembership: OrganizationMembership): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteOrganizationMembership(userId, organizationMembership)) + } yield r + + + def deleteOrganizationMembership(userId: Long, organizationMembershipId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteOrganizationMembership(userId, organizationMembershipId)) + } yield r + + + def setOrganizationMembershipAsDefault(userId: Long, organizationMembership: OrganizationMembership): IO[ZendeskError, List[OrganizationMembership]] = + for { + c <- client + r <- ZIO.attempt(c.setOrganizationMembershipAsDefault(userId, organizationMembership)) + } yield r + + + def getSearchResults(query: String): IO[ZendeskError, List[SearchResultEntity]] = + for { + c <- client + r <- ZIO.attempt(c.getSearchResults(query)) + } yield r + + + def getSearchResults[T <: SearchResultEntity](cls: Class[T], query: String): IO[ZendeskError, List[T]] = + for { + c <- client + r <- ZIO.attempt(c.getSearchResults[T](cls, query)) + } yield r + + + def getSearchResults[T <: SearchResultEntity](cls: Class[T], query: String, params: String): IO[ZendeskError, List[T]] = + for { + c <- client + r <- ZIO.attempt(c.getSearchResults[T](cls, query, params)) + } yield r + + + def notifyApp(json: String): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.notifyApp(json)) + } yield r + + + def updateInstallation(id: Int, json: String): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.updateInstallation(id, json)) + } yield r + + + def getSatisfactionRatings: IO[ZendeskError, List[SatisfactionRating]] = + for { + c <- client + r <- ZIO.attempt(c.getSatisfactionRatings) + } yield r + + + def getSatisfactionRating(id: Long): IO[ZendeskError, SatisfactionRating] = + for { + c <- client + r <- ZIO.attempt(c.getSatisfactionRating(id)) + } yield r + + + def createSatisfactionRating(ticketId: Long, satisfactionRating: SatisfactionRating): IO[ZendeskError, SatisfactionRating] = + for { + c <- client + r <- ZIO.attempt(c.createSatisfactionRating(ticketId, satisfactionRating)) + } yield r + + + def createSatisfactionRating(ticket: Ticket, satisfactionRating: SatisfactionRating): IO[ZendeskError, SatisfactionRating] = + for { + c <- client + r <- ZIO.attempt(c.createSatisfactionRating(ticket, satisfactionRating)) + } yield r + + + def getHelpCenterLocales: IO[ZendeskError, List[String]] = + for { + c <- client + r <- ZIO.attempt(c.getHelpCenterLocales) + } yield r + + + def getArticles: IO[ZendeskError, List[Article]] = + for { + c <- client + r <- ZIO.attempt(c.getArticles) + } yield r + + + def getArticles(category: Category): IO[ZendeskError, List[Article]] = + for { + c <- client + r <- ZIO.attempt(c.getArticles(category)) + } yield r + + + def getArticlesIncrementally(startTime: Date): IO[ZendeskError, List[Article]] = + for { + c <- client + r <- ZIO.attempt(c.getArticlesIncrementally(startTime)) + } yield r + + + def getArticlesFromPage(page: Int): IO[ZendeskError, List[Article]] = + for { + c <- client + r <- ZIO.attempt(c.getArticlesFromPage(page)) + } yield r + + + def getArticle(articleId: Long): IO[ZendeskError, Article] = + for { + c <- client + r <- ZIO.attempt(c.getArticle(articleId)) + } yield r + + + def getArticleTranslations(articleId: Long): IO[ZendeskError, List[Translation]] = + for { + c <- client + r <- ZIO.attempt(c.getArticleTranslations(articleId)) + } yield r + + + def createArticle(article: Article): IO[ZendeskError, Article] = + for { + c <- client + r <- ZIO.attempt(c.createArticle(article)) + } yield r + + + def updateArticle(article: Article): IO[ZendeskError, Article] = + for { + c <- client + r <- ZIO.attempt(c.updateArticle(article)) + } yield r + + + def createArticleTranslation(articleId: Long, translation: Translation): IO[ZendeskError, Translation] = + for { + c <- client + r <- ZIO.attempt(c.createArticleTranslation(articleId, translation)) + } yield r + + + def updateArticleTranslation(articleId: Long, locale: String, translation: Translation): IO[ZendeskError, Translation] = + for { + c <- client + r <- ZIO.attempt(c.updateArticleTranslation(articleId, locale, translation)) + } yield r + + + def deleteArticle(article: Article): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteArticle(article)) + } yield r + + + def deleteArticleAttachment(attachment: ArticleAttachments): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteArticleAttachment(attachment)) + } yield r + + + def deleteArticleAttachment(attachmentId: Long): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteArticleAttachment(attachmentId)) + } yield r + + + def getCategories: IO[ZendeskError, List[Category]] = + for { + c <- client + r <- ZIO.attempt(c.getCategories) + } yield r + + + def getCategory(categoryId: Long): IO[ZendeskError, Category] = + for { + c <- client + r <- ZIO.attempt(c.getCategory(categoryId)) + } yield r + + + def getCategoryTranslations(categoryId: Long): IO[ZendeskError, List[Translation]] = + for { + c <- client + r <- ZIO.attempt(c.getCategoryTranslations(categoryId)) + } yield r + + + def createCategory(category: Category): IO[ZendeskError, Category] = + for { + c <- client + r <- ZIO.attempt(c.createCategory(category)) + } yield r + + + def updateCategory(category: Category): IO[ZendeskError, Category] = + for { + c <- client + r <- ZIO.attempt(c.updateCategory(category)) + } yield r + + + def createCategoryTranslation(categoryId: Long, translation: Translation): IO[ZendeskError, Translation] = + for { + c <- client + r <- ZIO.attempt(c.createCategoryTranslation(categoryId, translation)) + } yield r + + + def updateCategoryTranslation(categoryId: Long, locale: String, translation: Translation): IO[ZendeskError, Translation] = + for { + c <- client + r <- ZIO.attempt(c.updateCategoryTranslation(categoryId, locale, translation)) + } yield r + + + def deleteCategory(category: Category): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteCategory(category)) + } yield r + + + def getSections: IO[ZendeskError, List[Section]] = + for { + c <- client + r <- ZIO.attempt(c.getSections) + } yield r + + + def getSections(category: Category): IO[ZendeskError, List[Section]] = + for { + c <- client + r <- ZIO.attempt(c.getSections(category)) + } yield r + + + def getSection(sectionId: Long): IO[ZendeskError, Section] = + for { + c <- client + r <- ZIO.attempt(c.getSection(sectionId)) + } yield r + + + def getSectionTranslations(sectionId: Long): IO[ZendeskError, List[Translation]] = + for { + c <- client + r <- ZIO.attempt(c.getSectionTranslations(sectionId)) + } yield r + + + def createSection(section: Section): IO[ZendeskError, Section] = + for { + c <- client + r <- ZIO.attempt(c.createSection(section)) + } yield r + + + def updateSection(section: Section): IO[ZendeskError, Section] = + for { + c <- client + r <- ZIO.attempt(c.updateSection(section)) + } yield r + + + def createSectionTranslation(sectionId: Long, translation: Translation): IO[ZendeskError, Translation] = + for { + c <- client + r <- ZIO.attempt(c.createSectionTranslation(sectionId, translation)) + } yield r + + + def updateSectionTranslation(sectionId: Long, locale: String, translation: Translation): IO[ZendeskError, Translation] = + for { + c <- client + r <- ZIO.attempt(c.updateSectionTranslation(sectionId, locale, translation)) + } yield r + + + def deleteSection(section: Section): IO[ZendeskError, Unit] = + for { + c <- client + r <- ZIO.attempt(c.deleteSection(section)) + } yield r + + + def getUserSubscriptions(user: User): IO[ZendeskError, List[Subscription]] = + for { + c <- client + r <- ZIO.attempt(c.getUserSubscriptions(user)) + } yield r + + + def getUserSubscriptions(userId: Long): IO[ZendeskError, List[Subscription]] = + for { + c <- client + r <- ZIO.attempt(c.getUserSubscriptions(userId)) + } yield r + + + def getArticleSubscriptions(articleId: Long): IO[ZendeskError, List[Subscription]] = + for { + c <- client + r <- ZIO.attempt(c.getArticleSubscriptions(articleId)) + } yield r + + + def getArticleSubscriptions(articleId: Long, locale: String): IO[ZendeskError, List[Subscription]] = + for { + c <- client + r <- ZIO.attempt(c.getArticleSubscriptions(articleId, locale)) + } yield r + + + def getSectionSubscriptions(sectionId: Long): IO[ZendeskError, List[Subscription]] = + for { + c <- client + r <- ZIO.attempt(c.getSectionSubscriptions(sectionId)) + } yield r + + + def getSectionSubscriptions(sectionId: Long, locale: String): IO[ZendeskError, List[Subscription]] = + for { + c <- client + r <- ZIO.attempt(c.getSectionSubscriptions(sectionId, locale)) + } yield r + + + def getSchedules: IO[ZendeskError, List[Schedule]] = + for { + c <- client + r <- ZIO.attempt(c.getSchedules) + } yield r + + + def getSchedule(schedule: Schedule): IO[ZendeskError, Schedule] = + for { + c <- client + r <- ZIO.attempt(c.getSchedule(schedule: Schedule)) + } yield r + + + def getSchedule(scheduleId: Long): IO[ZendeskError, Schedule] = + for { + c <- client + r <- ZIO.attempt(c.getSchedule(scheduleId)) + } yield r + + + def getHolidaysForSchedule(schedule: Schedule): IO[ZendeskError, List[Holiday]] = + for { + c <- client + r <- ZIO.attempt(c.getHolidaysForSchedule(schedule).asScala.toList) + } yield r + + + def getHolidaysForSchedule(scheduleId: Long): IO[ZendeskError, List[Holiday]] = + for { + c <- client + r <- ZIO.attempt(c.getHolidaysForSchedule(scheduleId)) + } yield r +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/zendesk/Zendesk.scala b/jvm/src/main/scala/com/harana/modules/zendesk/Zendesk.scala new file mode 100644 index 0000000..8f1dbb5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/zendesk/Zendesk.scala @@ -0,0 +1,560 @@ +package com.harana.modules.zendesk + +import com.harana.modules.zendesk.models.ZendeskError +import org.zendesk.client.v2.model._ +import org.zendesk.client.v2.model.hc._ +import org.zendesk.client.v2.model.schedules.{Holiday, Schedule} +import org.zendesk.client.v2.model.targets._ +import zio.IO +import zio.macros.accessible + +import java.io.File +import java.util.Date + +@accessible +trait Zendesk { + + def getBrands: IO[ZendeskError, List[Brand]] + + def getTicketForm(id: Long): IO[ZendeskError, TicketForm] + + def getTicketForms: IO[ZendeskError, List[TicketForm]] + + def createTicketForm(ticketForm: TicketForm): IO[ZendeskError, TicketForm] + + def importTicket(ticketImport: TicketImport): IO[ZendeskError, Ticket] + + def importTickets(ticketImports: List[TicketImport]): IO[ZendeskError, List[Ticket]] + + def getRecentTickets: IO[ZendeskError, List[Ticket]] + + def getTickets: IO[ZendeskError, List[Ticket]] + + def getTicketsIncrementally(startDate: Date, endDate: Date): IO[ZendeskError, List[Ticket]] + + def getTicketsByExternalId(externalId: String, includeArchived: Boolean): IO[ZendeskError, List[Ticket]] + + def getTicketsFromSearch(searchTerm: String): IO[ZendeskError, List[Ticket]] + + def getTicket(ticketId: Long): IO[ZendeskError, Ticket] + + def getTickets(ticketIds: List[Long]): IO[ZendeskError, List[Ticket]] + + def getTicketIncidents(ticketId: Long): IO[ZendeskError, List[Ticket]] + + def getTicketCollaborators(ticketId: Long): IO[ZendeskError, List[User]] + + def getOrganizationTickets(organizationId: Long): IO[ZendeskError, List[Ticket]] + + def getUserRequestedTickets(userId: Long): IO[ZendeskError, List[Ticket]] + + def permanentlyDeleteTicket(ticket: Ticket): IO[ZendeskError, Unit] + + def permanentlyDeleteTicket(ticketId: Long): IO[ZendeskError, Unit] + + def deleteTicket(ticket: Ticket): IO[ZendeskError, Unit] + + def deleteTicket(ticketId: Long): IO[ZendeskError, Unit] + + def createTicket(ticket: Ticket): IO[ZendeskError, Ticket] + + def createTickets(tickets: List[Ticket]): IO[ZendeskError, List[Ticket]] + + def updateTicket(ticket: Ticket): IO[ZendeskError, Ticket] + + def updateTickets(tickets: List[Ticket]): IO[ZendeskError, List[Ticket]] + + def markTicketAsSpam(ticket: Ticket): IO[ZendeskError, Unit] + + def deleteTickets(ticketIds: List[Long]): IO[ZendeskError, Unit] + + def permanentlyDeleteTickets(ticketIds: List[Long]): IO[ZendeskError, Unit] + + def getComplianceDeletionStatuses(userId: Long): IO[ZendeskError, List[ComplianceDeletionStatus]] + + def getUserCCDTickets(userId: Long): IO[ZendeskError, List[Ticket]] + + def getUserRelatedInfo(userId: Long): IO[ZendeskError, UserRelatedInfo] + + def getTicketMetrics: IO[ZendeskError, List[Metric]] + + def getTicketMetricByTicket(id: Long): IO[ZendeskError, Metric] + + def getTicketMetric(id: Long): IO[ZendeskError, Metric] + + def getTicketAudits(ticket: Ticket): IO[ZendeskError, List[Audit]] + + def getTicketAudits(id: Long): IO[ZendeskError, List[Audit]] + + def getTicketAudit(ticket: Ticket, audit: Audit): IO[ZendeskError, Audit] + + def getTicketAudit(ticket: Ticket, auditId: Long): IO[ZendeskError, Audit] + + def getTicketAudit(ticketId: Long, auditId: Long): IO[ZendeskError, Audit] + + def trustTicketAudit(ticket: Ticket, audit: Audit): IO[ZendeskError, Unit] + + def trustTicketAudit(ticket: Ticket, auditId: Long): IO[ZendeskError, Unit] + + def trustTicketAudit(ticketId: Long, auditId: Long): IO[ZendeskError, Unit] + + def makePrivateTicketAudit(ticket: Ticket, audit: Audit): IO[ZendeskError, Unit] + + def makePrivateTicketAudit(ticket: Ticket, auditId: Long): IO[ZendeskError, Unit] + + def makePrivateTicketAudit(ticketId: Long, auditId: Long): IO[ZendeskError, Unit] + + def getTicketFields: IO[ZendeskError, List[Field]] + + def getTicketField(ticketFieldId: Long): IO[ZendeskError, Field] + + def createTicketField(field: Field): IO[ZendeskError, Field] + + def updateTicketField(field: Field): IO[ZendeskError, Field] + + def deleteTicketField(field: Field): IO[ZendeskError, Unit] + + def deleteTicketField(ticketFieldId: Long): IO[ZendeskError, Unit] + + def getSuspendedTickets: IO[ZendeskError, List[SuspendedTicket]] + + def deleteSuspendedTicket(ticket: SuspendedTicket): IO[ZendeskError, Unit] + + def deleteSuspendedTicket(ticketId: Long): IO[ZendeskError, Unit] + + def createUpload(fileName: String, content: Array[Byte]): IO[ZendeskError, Attachment.Upload] + + def createUpload(fileName: String, contentType: String, content: Array[Byte]): IO[ZendeskError, Attachment.Upload] + + def createUpload(token: String, fileName: String, contentType: String, content: Array[Byte]): IO[ZendeskError, Attachment.Upload] + + def deleteUpload(upload: Attachment.Upload): IO[ZendeskError, Unit] + + def deleteUpload(token: String): IO[ZendeskError, Unit] + + def getArticlesFromSearch(searchTerm: String): IO[ZendeskError, List[Article]] + + def getArticlesFromSearch(searchTerm: String, sectionId: Long): IO[ZendeskError, List[Article]] + + def getAttachmentsFromArticle(articleId: Long): IO[ZendeskError, List[ArticleAttachments]] + + def associateAttachmentsToArticle(articleId: Long, attachment: List[Attachment]): IO[ZendeskError, Unit] + + def createUploadArticle(articleId: Long, file: File, inline: Boolean): IO[ZendeskError, ArticleAttachments] + + def getAttachment(attachment: Attachment): IO[ZendeskError, Attachment] + + def getAttachment(attachmentId: Long): IO[ZendeskError, Attachment] + + def deleteAttachment(attachmentId: Long): IO[ZendeskError, Unit] + + def getTargets: IO[ZendeskError, List[Target]] + + def getTarget(targetId: Long): IO[ZendeskError, Target] + + def createTarget(target: Target): IO[ZendeskError, Target] + + def deleteTarget(targetId: Long): IO[ZendeskError, Unit] + + def getTriggers: IO[ZendeskError, List[Trigger]] + + def getTrigger(triggerId: Long): IO[ZendeskError, Trigger] + + def createTrigger(trigger: Trigger): IO[ZendeskError, Trigger] + + def updateTrigger(triggerId: Long, trigger: Trigger): IO[ZendeskError, Trigger] + + def deleteTrigger(triggerId: Long): IO[ZendeskError, Unit] + + def getAutomations: IO[ZendeskError, List[Automation]] + + def getAutomation(id: Long): IO[ZendeskError, Automation] + + def createAutomation(automation: Automation): IO[ZendeskError, Automation] + + def updateAutomation(automationId: Long, automation: Automation): IO[ZendeskError, Automation] + + def deleteAutomation(automationId: Long): IO[ZendeskError, Unit] + + def getTwitterMonitors: IO[ZendeskError, List[TwitterMonitor]] + + def getUsers: IO[ZendeskError, List[User]] + + def getUsersByRoles(roles: List[String]): IO[ZendeskError, List[User]] + + def getUsersIncrementally(startTime: Date): IO[ZendeskError, List[User]] + + def getGroupUsers(groupId: Long): IO[ZendeskError, List[User]] + + def getOrganizationUsers(organizationId: Long): IO[ZendeskError, List[User]] + + def getUser(userId: Long): IO[ZendeskError, User] + + def getAuthenticatedUser: IO[ZendeskError, User] + + def getUserFields: IO[ZendeskError, List[UserField]] + + def createUser(user: User): IO[ZendeskError, User] + + def createUsers(users: List[User]): IO[ZendeskError, List[User]] + + def createOrUpdateUser(user: User): IO[ZendeskError, User] + + def updateUser(user: User): IO[ZendeskError, User] + + def deleteUser(user: User): IO[ZendeskError, Unit] + + def deleteUser(userId: Long): IO[ZendeskError, Unit] + + def permanentlyDeleteUser(user: User): IO[ZendeskError, User] + + def permanentlyDeleteUser(userId: Long): IO[ZendeskError, User] + + def suspendUser(userId: Long): IO[ZendeskError, User] + + def unsuspendUser(userId: Long): IO[ZendeskError, User] + + def lookupUserByEmail(email: String): IO[ZendeskError, List[User]] + + def lookupUserByExternalId(externalId: String): IO[ZendeskError, List[User]] + + def getCurrentUser: IO[ZendeskError, User] + + def resetUserPassword(user: User, password: String): IO[ZendeskError, Unit] + + def resetUserPassword(userId: Long, password: String): IO[ZendeskError, Unit] + + def changeUserPassword(user: User, oldPassword: String, newPassword: String): IO[ZendeskError, Unit] + + def getUserIdentities(user: User): IO[ZendeskError, List[Identity]] + + def getUserIdentities(userId: Long): IO[ZendeskError, List[Identity]] + + def getUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] + + def getUserIdentity(user: User, identityId: Long): IO[ZendeskError, Identity] + + def getUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Identity] + + def setUserPrimaryIdentity(user: User, identity: Identity): IO[ZendeskError, List[Identity]] + + def setUserPrimaryIdentity(user: User, identityId: Long): IO[ZendeskError, List[Identity]] + + def setUserPrimaryIdentity(userId: Long, identityId: Long): IO[ZendeskError, List[Identity]] + + def verifyUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] + + def verifyUserIdentity(user: User, identityId: Long): IO[ZendeskError, Identity] + + def verifyUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Identity] + + def requestVerifyUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] + + def requestVerifyUserIdentity(user: User, identityId: Long): IO[ZendeskError, Identity] + + def requestVerifyUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Identity] + + def updateUserIdentity(userId: Long, identity: Identity): IO[ZendeskError, Identity] + + def updateUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] + + def deleteUserIdentity(user: User, identity: Identity): IO[ZendeskError, Unit] + + def deleteUserIdentity(user: User, identityId: Long): IO[ZendeskError, Unit] + + def deleteUserIdentity(userId: Long, identityId: Long): IO[ZendeskError, Unit] + + def createUserIdentity(userId: Long, identity: Identity): IO[ZendeskError, Identity] + + def createUserIdentity(user: User, identity: Identity): IO[ZendeskError, Identity] + + def getCustomAgentRoles: IO[ZendeskError, List[AgentRole]] + + def getRequests: IO[ZendeskError, List[Request]] + + def getOpenRequests: IO[ZendeskError, List[Request]] + + def getSolvedRequests: IO[ZendeskError, List[Request]] + + def getCCRequests: IO[ZendeskError, List[Request]] + + def getUserRequests(user: User): IO[ZendeskError, List[Request]] + + def getUserRequests(id: Long): IO[ZendeskError, List[Request]] + + def getRequest(id: Long): IO[ZendeskError, Request] + + def createRequest(request: Request): IO[ZendeskError, Request] + + def updateRequest(request: Request): IO[ZendeskError, Request] + + def getRequestComments(request: Request): IO[ZendeskError, List[Comment]] + + def getRequestComments(id: Long): IO[ZendeskError, List[Comment]] + + def getTicketComments(id: Long): IO[ZendeskError, List[Comment]] + + def getRequestComment(request: Request, comment: Comment): IO[ZendeskError, Comment] + + def getRequestComment(request: Request, commentId: Long): IO[ZendeskError, Comment] + + def getRequestComment(requestId: Long, commentId: Long): IO[ZendeskError, Comment] + + def createComment(ticketId: Long, comment: Comment): IO[ZendeskError, Ticket] + + def createTicketFromTweet(tweetId: Long, monitorId: Long): IO[ZendeskError, Ticket] + + def getOrganizations: IO[ZendeskError, List[Organization]] + + def getOrganizationsIncrementally(startTime: Date): IO[ZendeskError, List[Organization]] + + def getOrganizationFields: IO[ZendeskError, List[OrganizationField]] + + def getAutoCompleteOrganizations(name: String): IO[ZendeskError, List[Organization]] + + def getOrganization(id: Long): IO[ZendeskError, Organization] + + def createOrganization(organization: Organization): IO[ZendeskError, Organization] + + def createOrganizations(organizations: List[Organization]): IO[ZendeskError, List[Organization]] + + def updateOrganization(organization: Organization): IO[ZendeskError, Organization] + + def deleteOrganization(organization: Organization): IO[ZendeskError, Unit] + + def deleteOrganization(id: Long): IO[ZendeskError, Unit] + + def lookupOrganizationsByExternalId(externalId: String): IO[ZendeskError, List[Organization]] + + def getOrganizationMemberships: IO[ZendeskError, List[OrganizationMembership]] + + def getOrganizationMembershipsForOrg(organizationId: Long): IO[ZendeskError, List[OrganizationMembership]] + + def getOrganizationMembershipsForUser(userId: Long): IO[ZendeskError, List[OrganizationMembership]] + + def getOrganizationMembershipForUser(userId: Long, id: Long): IO[ZendeskError, OrganizationMembership] + + def getOrganizationMembership(id: Long): IO[ZendeskError, OrganizationMembership] + + def createOrganizationMembership(organizationMembership: OrganizationMembership): IO[ZendeskError, OrganizationMembership] + + def deleteOrganizationMembership(id: Long): IO[ZendeskError, Unit] + + def getGroups: IO[ZendeskError, List[Group]] + + def getAssignableGroups: IO[ZendeskError, List[Group]] + + def getGroup(groupId: Long): IO[ZendeskError, Group] + + def createGroup(group: Group): IO[ZendeskError, Group] + + def updateGroup(group: Group): IO[ZendeskError, Group] + + def deleteGroup(group: Group): IO[ZendeskError, Unit] + + def deleteGroup(groupId: Long): IO[ZendeskError, Unit] + + def getMacros: IO[ZendeskError, List[Macro]] + + def getMacro(macroId: Long): IO[ZendeskError, Macro] + + def createMacro(`macro`: Macro): IO[ZendeskError, Macro] + + def updateMacro(macroId: Long, `macro`: Macro): IO[ZendeskError, Macro] + + def macrosShowChangesToTicket(macroId: Long): IO[ZendeskError, Ticket] + + def macrosShowTicketAfterChanges(ticketId: Long, macroId: Long): IO[ZendeskError, Ticket] + + def addTagToTicket(ticketId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def addTagToTopic(topicId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def addTagToOrganization(organizationId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def setTagOnTicket(tagId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def setTagOnTopic(topicId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def setTagOnOrganisation(organizationId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def removeTagFromTicket(ticketId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def removeTagFromTopic(topicId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def removeTagFromOrganisation(organizationId: Long, tags: List[String]): IO[ZendeskError, List[String]] + + def getIncrementalTicketsResult(unixEpochTime: Long): IO[ZendeskError, Map[_, _]] + + def getGroupMemberships: IO[ZendeskError, List[GroupMembership]] + + def getGroupMembershipByUser(userId: Long): IO[ZendeskError, List[GroupMembership]] + + def getGroupMemberships(groupId: Long): IO[ZendeskError, List[GroupMembership]] + + def getAssignableGroupMemberships: IO[ZendeskError, List[GroupMembership]] + + def getAssignableGroupMemberships(groupId: Long): IO[ZendeskError, List[GroupMembership]] + + def getGroupMembership(groupMembershipId: Long): IO[ZendeskError, GroupMembership] + + def getGroupMembership(userId: Long, groupMembershipId: Long): IO[ZendeskError, GroupMembership] + + def createGroupMembership(groupMembership: GroupMembership): IO[ZendeskError, GroupMembership] + + def createGroupMembership(userId: Long, groupMembership: GroupMembership): IO[ZendeskError, GroupMembership] + + def deleteGroupMembership(groupMembership: GroupMembership): IO[ZendeskError, Unit] + + def deleteGroupMembership(id: Long): IO[ZendeskError, Unit] + + def deleteGroupMembership(userId: Long, groupMembership: GroupMembership): IO[ZendeskError, Unit] + + def deleteGroupMembership(userId: Long, groupMembershipId: Long): IO[ZendeskError, Unit] + + def setGroupMembershipAsDefault(userId: Long, groupMembership: GroupMembership): IO[ZendeskError, List[GroupMembership]] + + def getForums: IO[ZendeskError, List[Forum]] + + def getForums(categoryId: Long): IO[ZendeskError, List[Forum]] + + def getForum(forumId: Long): IO[ZendeskError, Forum] + + def createForum(forum: Forum): IO[ZendeskError, Forum] + + def updateForum(forum: Forum): IO[ZendeskError, Forum] + + def deleteForum(forum: Forum): IO[ZendeskError, Unit] + + def getTopics: IO[ZendeskError, List[Topic]] + + def getTopics(forumId: Long): IO[ZendeskError, List[Topic]] + + def getTopics(topicIds: List[Long]): IO[ZendeskError, List[Topic]] + + def getTopicsByUser(userId: Long): IO[ZendeskError, List[Topic]] + + def getTopic(topicId: Long): IO[ZendeskError, Topic] + + def createTopic(topic: Topic): IO[ZendeskError, Topic] + + def importTopic(topic: Topic): IO[ZendeskError, Topic] + + def updateTopic(topic: Topic): IO[ZendeskError, Topic] + + def deleteTopic(topic: Topic): IO[ZendeskError, Unit] + + def getOrganizationMembershipsByUser(userId: Long): IO[ZendeskError, List[OrganizationMembership]] + + def getGroupOrganization(userId: Long, organizationMembershipId: Long): IO[ZendeskError, OrganizationMembership] + + def createOrganizationMembership(userId: Long, organizationMembership: OrganizationMembership): IO[ZendeskError, OrganizationMembership] + + def deleteOrganizationMembership(userId: Long, organizationMembership: OrganizationMembership): IO[ZendeskError, Unit] + + def deleteOrganizationMembership(userId: Long, organizationMembershipId: Long): IO[ZendeskError, Unit] + + def setOrganizationMembershipAsDefault(userId: Long, organizationMembership: OrganizationMembership): IO[ZendeskError, List[OrganizationMembership]] + + def getSearchResults(query: String): IO[ZendeskError, List[SearchResultEntity]] + + def getSearchResults[T <: SearchResultEntity](cls: Class[T], query: String): IO[ZendeskError, List[T]] + + def getSearchResults[T <: SearchResultEntity](cls: Class[T], query: String, params: String): IO[ZendeskError, List[T]] + + def notifyApp(json: String): IO[ZendeskError, Unit] + + def updateInstallation(id: Int, json: String): IO[ZendeskError, Unit] + + def getSatisfactionRatings: IO[ZendeskError, List[SatisfactionRating]] + + def getSatisfactionRating(id: Long): IO[ZendeskError, SatisfactionRating] + + def createSatisfactionRating(ticketId: Long, satisfactionRating: SatisfactionRating): IO[ZendeskError, SatisfactionRating] + + def createSatisfactionRating(ticket: Ticket, satisfactionRating: SatisfactionRating): IO[ZendeskError, SatisfactionRating] + + def getHelpCenterLocales: IO[ZendeskError, List[String]] + + def getArticles: IO[ZendeskError, List[Article]] + + def getArticles(category: Category): IO[ZendeskError, List[Article]] + + def getArticlesIncrementally(startTime: Date): IO[ZendeskError, List[Article]] + + def getArticlesFromPage(page: Int): IO[ZendeskError, List[Article]] + + def getArticle(articleId: Long): IO[ZendeskError, Article] + + def getArticleTranslations(articleId: Long): IO[ZendeskError, List[Translation]] + + def createArticle(article: Article): IO[ZendeskError, Article] + + def updateArticle(article: Article): IO[ZendeskError, Article] + + def createArticleTranslation(articleId: Long, translation: Translation): IO[ZendeskError, Translation] + + def updateArticleTranslation(articleId: Long, locale: String, translation: Translation): IO[ZendeskError, Translation] + + def deleteArticle(article: Article): IO[ZendeskError, Unit] + + def deleteArticleAttachment(attachment: ArticleAttachments): IO[ZendeskError, Unit] + + def deleteArticleAttachment(attachmentId: Long): IO[ZendeskError, Unit] + + def getCategories: IO[ZendeskError, List[Category]] + + def getCategory(categoryId: Long): IO[ZendeskError, Category] + + def getCategoryTranslations(categoryId: Long): IO[ZendeskError, List[Translation]] + + def createCategory(category: Category): IO[ZendeskError, Category] + + def updateCategory(category: Category): IO[ZendeskError, Category] + + def createCategoryTranslation(categoryId: Long, translation: Translation): IO[ZendeskError, Translation] + + def updateCategoryTranslation(categoryId: Long, locale: String, translation: Translation): IO[ZendeskError, Translation] + + def deleteCategory(category: Category): IO[ZendeskError, Unit] + + def getSections: IO[ZendeskError, List[Section]] + + def getSections(category: Category): IO[ZendeskError, List[Section]] + + def getSection(sectionId: Long): IO[ZendeskError, Section] + + def getSectionTranslations(sectionId: Long): IO[ZendeskError, List[Translation]] + + def createSection(section: Section): IO[ZendeskError, Section] + + def updateSection(section: Section): IO[ZendeskError, Section] + + def createSectionTranslation(sectionId: Long, translation: Translation): IO[ZendeskError, Translation] + + def updateSectionTranslation(sectionId: Long, locale: String, translation: Translation): IO[ZendeskError, Translation] + + def deleteSection(section: Section): IO[ZendeskError, Unit] + + def getUserSubscriptions(user: User): IO[ZendeskError, List[Subscription]] + + def getUserSubscriptions(userId: Long): IO[ZendeskError, List[Subscription]] + + def getArticleSubscriptions(articleId: Long): IO[ZendeskError, List[Subscription]] + + def getArticleSubscriptions(articleId: Long, locale: String): IO[ZendeskError, List[Subscription]] + + def getSectionSubscriptions(sectionId: Long): IO[ZendeskError, List[Subscription]] + + def getSectionSubscriptions(sectionId: Long, locale: String): IO[ZendeskError, List[Subscription]] + + def getSchedules: IO[ZendeskError, List[Schedule]] + + def getSchedule(schedule: Schedule): IO[ZendeskError, Schedule] + + def getSchedule(scheduleId: Long): IO[ZendeskError, Schedule] + + def getHolidaysForSchedule(schedule: Schedule): IO[ZendeskError, List[Holiday]] + + def getHolidaysForSchedule(scheduleId: Long): IO[ZendeskError, List[Holiday]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/zendesk/models/ZendeskError.scala b/jvm/src/main/scala/com/harana/modules/zendesk/models/ZendeskError.scala new file mode 100644 index 0000000..a6ed2a2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/zendesk/models/ZendeskError.scala @@ -0,0 +1,8 @@ +package com.harana.modules.zendesk.models + +sealed trait ZendeskError +object ZendeskError { + case class RateLimit(e: Exception) extends ZendeskError + case class Response(e: Exception) extends ZendeskError + case class Unknown(t: Throwable) extends ZendeskError +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/zendesk/package.scala b/jvm/src/main/scala/com/harana/modules/zendesk/package.scala new file mode 100644 index 0000000..bcaca9d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/zendesk/package.scala @@ -0,0 +1,42 @@ +package com.harana.modules + +import com.harana.modules.zendesk.models.ZendeskError +import org.asynchttpclient.ListenableFuture +import org.zendesk.client.v2.model.JobStatus +import org.zendesk.client.v2.{ZendeskResponseException, ZendeskResponseRateLimitException} +import zio.{IO, Task, ZIO} + +import scala.compat.java8.FutureConverters._ +import scala.jdk.CollectionConverters._ + +package object zendesk { + + implicit def iterableTaskToIO[A](fn: zio.Task[java.lang.Iterable[A]]): IO[ZendeskError, List[A]] = + fn.mapBoth(handleException, _.asScala.toList) + + implicit def taskToIO[A](fn: zio.Task[A]): IO[ZendeskError, A] = + fn.mapError(handleException) + + implicit def futureToIO[A](fn: Task[ListenableFuture[A]]): IO[ZendeskError, A] = + fn.flatMap { f => + ZIO.fromFuture { implicit ec => + f.toCompletableFuture.toScala + } + }.mapError(handleException) + + implicit def listenableFutureToIO[A](fn: Task[ListenableFuture[JobStatus]]): IO[ZendeskError, List[A]] = + listenableFutureToIO(fn) + + implicit def javaListToScalaList[A](list: java.util.List[A]): List[A] = + list.asScala.toList + + implicit def scalaListToJavaIterable[A](list: List[A]): java.util.List[A] = + list.asJava + + def handleException(t: Throwable): ZendeskError = + t match { + case e: ZendeskResponseRateLimitException => ZendeskError.RateLimit(e) + case e: ZendeskResponseException => ZendeskError.Response(e) + case t: Throwable => ZendeskError.Unknown(t) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/zookeeper/LiveZookeeper.scala b/jvm/src/main/scala/com/harana/modules/zookeeper/LiveZookeeper.scala new file mode 100644 index 0000000..380b434 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/zookeeper/LiveZookeeper.scala @@ -0,0 +1,47 @@ +package com.harana.modules.zookeeper + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.docker.Docker +import com.harana.modules.zookeeper.LiveZookeeper.image +import zio.{Task, ZIO, ZLayer} + +object LiveZookeeper { + private val image = "zookeeper:3.6" + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + docker <- ZIO.service[Docker] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveZookeeper(config,docker, logger, micrometer) + } +} + +case class LiveZookeeper(config: Config, docker: Docker, logger: Logger, micrometer: Micrometer) extends Zookeeper { + + def localStart: Task[Unit] = + for { + _ <- logger.info("Starting Zookeeper") + running <- docker.containerRunning("zookeeper") + _ <- logger.debug("Existing Zookeeper container not found. Starting a new one.").when(!running) + _ <- start.when(!running) + } yield () + + def localStop: Task[Unit] = + for { + _ <- logger.info("Stopping Zookeeper") + containers <- docker.listContainers(nameFilter = List("zookeeper")) + _ <- ZIO.foreach(containers.map(_.getId))(id => docker.stopContainer(id)) + } yield () + + private def start = + for { + _ <- docker.pullImage(image) + id <- docker.createContainer("zookeeper", image, exposedPorts = Map(2181 -> 2181)) + _ <- docker.startContainer(id) + } yield () + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/zookeeper/Zookeeper.scala b/jvm/src/main/scala/com/harana/modules/zookeeper/Zookeeper.scala new file mode 100644 index 0000000..bf705fe --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/zookeeper/Zookeeper.scala @@ -0,0 +1,13 @@ +package com.harana.modules.zookeeper + +import zio.Task +import zio.macros.accessible + +@accessible +trait Zookeeper { + + def localStart: Task[Unit] + + def localStop: Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/test/resources/SampleProject.yml b/jvm/src/test/resources/SampleProject.yml new file mode 100644 index 0000000..f3c7979 --- /dev/null +++ b/jvm/src/test/resources/SampleProject.yml @@ -0,0 +1,106 @@ +title: Project One +description: Sample project to test the deployment functionality. +author: + name: Michael Thomson + email: michaelthomson@nbnco.com.au + +parameters: + - name: glue_db_name + value: assn_${env}_telemetry + - name: glue_table_name + value: cuperf_line_list + - name: home_path + value: s3://assn-csa-${env}-telemetry-data-lake/PROD/RAW/CuperfLineList + - name: headers_size + value: 14 + +repositories: + - name: project_one + git: + url: https://git.nbnco.net.au/analytics/one.git + username: michaelthomson + password: password + branch: master + tag: 0.4 + +containers: + - name: database + docker: + image: mysql:mysql + command: [echo, "${parameters.map}"] + parameters: + - name: glue_db_name + default: 4g + + - name: webapp + scala: + repository: project_one + path: projects/one + sbt: + file: build.sbt + memory: 4g + ports: + - name: webapp + internal: 5000 + external: 5000 + + - name: webapp2 + buildpack: + repository: project_one + path: projects/one + expose: + port: 5000 + + - name: monitoring + docker: + repository: project_one + path: projects/one + dependencies: + - database + - webapp + strategy: recreate + expose: + port: 5000 + +pipelines: + - name: Deploy + start: + action: webapp + triggers: + - name: wait-for-sns + sns: + topic: topic-arn + port: 9200 + url: http;//myfakeure + - name: slack + slack: + channel: topic-arn + actions: + - name: webapp + parameters: + - name: message + value: "hi" + container: + name: webapp + version: 6 + resources: + cpu: 4 + memory: 4g + dependencies: ["database", "monitoring"] + +daemons: + - name: Deploy + replicas: 4 + containers: + - name: webapp + version: 6 + strategy: + canary: + intial: 4 + +notifications: + - name: slack + slack: + channel: project-one-{{env}} + events: + - publish diff --git a/macros/src/main/scala/com/harana/macros/mongo/Macros.scala b/macros/src/main/scala/com/harana/macros/mongo/Macros.scala new file mode 100755 index 0000000..ec65ba2 --- /dev/null +++ b/macros/src/main/scala/com/harana/macros/mongo/Macros.scala @@ -0,0 +1,574 @@ +package com.harana.macros.mongo + +import java.time.Instant +import java.util.UUID +import java.util.concurrent.ConcurrentHashMap + +import org.bson._ +import org.bson.codecs.configuration._ +import org.bson.codecs.{LongCodec => BsonLongCodec, ObjectIdCodec => BsonObjectIdCodec, _} +import org.mongodb.scala.bson.ObjectId + +import scala.collection.concurrent +import scala.language.experimental.macros +import scala.reflect.ClassTag +import scala.reflect.macros.whitebox +import scala.reflect.runtime.universe._ +import scala.util.Try + +class DoubleCodec extends Codec[Double] { + def getEncoderClass: Class[Double] = classOf[Double] + + val inner = new BsonDoubleCodec + + def encode(writer: BsonWriter, it: Double, encoderContext: EncoderContext) = { + inner.encode(writer, new BsonDouble(it), encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Double = { + inner.decode(reader, decoderContext).getValue + } +} + +class IntCodec extends Codec[Int] { + def getEncoderClass: Class[Int] = classOf[Int] + + val inner = new IntegerCodec + + def encode(writer: BsonWriter, it: Int, encoderContext: EncoderContext) = { + inner.encode(writer, it, encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Int = { + inner.decode(reader, decoderContext) + } +} + +class LongCodec extends Codec[Long] { + def getEncoderClass: Class[Long] = classOf[Long] + + val inner = new BsonLongCodec + + def encode(writer: BsonWriter, it: Long, encoderContext: EncoderContext) = { + inner.encode(writer, it, encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Long = { + inner.decode(reader, decoderContext) + } +} + +class BooleanCodec extends Codec[Boolean] { + def getEncoderClass: Class[Boolean] = classOf[Boolean] + + val inner = new org.bson.codecs.BooleanCodec + + def encode(writer: BsonWriter, it: Boolean, encoderContext: EncoderContext) = { + inner.encode(writer, it, encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Boolean = { + inner.decode(reader, decoderContext) + } +} + +class InstantCodec extends Codec[Instant] { + def getEncoderClass: Class[Instant] = classOf[Instant] + + val inner = new BsonDateTimeCodec + + def encode(writer: BsonWriter, it: Instant, encoderContext: EncoderContext) = { + inner.encode(writer, new BsonDateTime(it.toEpochMilli), encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Instant = { + Instant.ofEpochMilli(inner.decode(reader, decoderContext).getValue) + } +} + +class ObjectIdCodec extends Codec[ObjectId] { + def getEncoderClass: Class[ObjectId] = classOf[ObjectId] + + val inner = new BsonObjectIdCodec + + def encode(writer: BsonWriter, it: ObjectId, encoderContext: EncoderContext) = { + inner.encode(writer, it, encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): ObjectId = { + inner.decode(reader, decoderContext) + } +} + +class UUIDCodec extends Codec[UUID] { + def getEncoderClass: Class[UUID] = classOf[UUID] + + val inner = new UuidCodec + + def encode(writer: BsonWriter, it: UUID, encoderContext: EncoderContext) = { + inner.encode(writer, it, encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): UUID = { + inner.decode(reader, decoderContext) + } +} + +class EnumerationCodec[T](implicit ct: ClassTag[T], tt: TypeTag[T]) extends Codec[T] { + def getEncoderClass: Class[T] = ct.runtimeClass.asInstanceOf[Class[T]] + + val inner = new StringCodec + + def encode(writer: BsonWriter, it: T, encoderContext: EncoderContext) = { + inner.encode(writer, it.toString, encoderContext) + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): T = { + reflectEnum[T](inner.decode(reader, decoderContext)) + } +} + +trait IterableCodec[T] { + def encodeIterable(inner: Codec[T], writer: BsonWriter, it: Iterable[T], encoderContext: EncoderContext) = { + writer.writeStartArray() + it.foreach(inner.encode(writer, _, encoderContext)) + writer.writeEndArray() + } + + def decodeIterable(inner: Codec[T], reader: BsonReader, decoderContext: DecoderContext): Iterable[T] = { + reader.readStartArray() + val buffer = scala.collection.mutable.Buffer[T]() + while (reader.readBsonType != BsonType.END_OF_DOCUMENT) { + buffer.append(inner.decode(reader, decoderContext)) + } + reader.readEndArray() + buffer + } +} + +class SeqCodec[T](inner: Codec[T]) extends Codec[Seq[T]] with IterableCodec[T] { + def getEncoderClass: Class[Seq[T]] = classOf[Seq[T]] + + def encode(writer: BsonWriter, it: Seq[T], encoderContext: EncoderContext): Unit = encodeIterable(inner, writer, it, encoderContext) + + def decode(reader: BsonReader, decoderContext: DecoderContext): Seq[T] = decodeIterable(inner, reader, decoderContext).toSeq +} + +class SetCodec[T](inner: Codec[T]) extends Codec[Set[T]] with IterableCodec[T] { + def getEncoderClass: Class[Set[T]] = classOf[Set[T]] + + def encode(writer: BsonWriter, it: Set[T], encoderContext: EncoderContext): Unit = encodeIterable(inner, writer, it, encoderContext) + + def decode(reader: BsonReader, decoderContext: DecoderContext): Set[T] = decodeIterable(inner, reader, decoderContext).toSet +} + +class ListCodec[T](inner: Codec[T]) extends Codec[List[T]] with IterableCodec[T] { + def getEncoderClass: Class[List[T]] = classOf[List[T]] + + def encode(writer: BsonWriter, it: List[T], encoderContext: EncoderContext): Unit = encodeIterable(inner, writer, it, encoderContext) + + def decode(reader: BsonReader, decoderContext: DecoderContext): List[T] = decodeIterable(inner, reader, decoderContext).toList +} + +class MapCodec[A, B](inner: Codec[Any])(implicit ct: ClassTag[A], tt: TypeTag[A]) extends Codec[Map[A, B]] { + + private val StringClass = classOf[String] + private val DoubleClass = classOf[Double] + private val IntClass = classOf[Int] + private val LongClass = classOf[Long] + private val BooleanClass = classOf[Boolean] + private val InstantClass = classOf[Instant] + private val ObjectIdClass = classOf[ObjectId] + private val UUIDClass = classOf[UUID] + private val EnumClass = classOf[Enumeration#Value] + + def getEncoderClass: Class[Map[A, B]] = classOf[Map[A, B]] + + def encode(writer: BsonWriter, it: Map[A, B], encoderContext: EncoderContext) = { + writer.writeStartDocument() + it.foreach { case (k, v) => + val str = k match { + case s: String => s + case d: Double => d.toString + case i: Int => i.toString + case l: Long => l.toString + case b: Boolean => b.toString + case it: Instant => it.toEpochMilli.toString + case o: ObjectId => o.toHexString + case u: UUID => u.toString + case e: scala.Enumeration#Value => e.toString + case _ => throw new RuntimeException("Type not supported as Map key.") + } + writer.writeName(str) + inner.encode(writer, v, encoderContext) + } + writer.writeEndDocument() + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Map[A, B] = { + reader.readStartDocument() + val buffer = scala.collection.mutable.Buffer[(A, B)]() + while (reader.readBsonType != BsonType.END_OF_DOCUMENT) { + val name = reader.readName + val obj = ct.runtimeClass match { + case StringClass => name + case DoubleClass => name.toDouble + case IntClass => name.toInt + case LongClass => name.toLong + case BooleanClass => name.toBoolean + case InstantClass => Instant.ofEpochMilli(name.toLong) + case ObjectIdClass => new ObjectId(name) + case UUIDClass => UUID.fromString(name) + case EnumClass => reflectEnum[A](name) + case _ => throw new RuntimeException("Type not supported as Map key.") + } + buffer.append((obj.asInstanceOf[A], inner.decode(reader, decoderContext).asInstanceOf[B])) + } + reader.readEndDocument() + buffer.toMap + } +} + +class EitherCodec[A, B](innerA: Codec[A], innerB: Codec[B]) extends Codec[Either[A, B]] { + def getEncoderClass: Class[Either[A, B]] = classOf[Either[A, B]] + + def encode(writer: BsonWriter, it: Either[A, B], encoderContext: EncoderContext) = { + writer.writeStartDocument() + it match { + case Left(x) => + writer.writeName("left") + innerA.encode(writer, x, encoderContext) + case Right(x) => + writer.writeName("right") + innerB.encode(writer, x, encoderContext) + } + writer.writeEndDocument() + } + + def decode(reader: BsonReader, decoderContext: DecoderContext): Either[A, B] = { + reader.readStartDocument() + val result = reader.readName match { + case "left" => Left(innerA.decode(reader, decoderContext)) + case "right" => Right(innerB.decode(reader, decoderContext)) + } + reader.readEndDocument() + result + } +} + +class ExistentialCodec[T](v: T) extends Codec[T] { + override def encode(writer: BsonWriter, value: T, encoderContext: EncoderContext) = {} + + override def getEncoderClass: Class[T] = v.getClass.asInstanceOf[Class[T]] + + override def decode(reader: BsonReader, decoderContext: DecoderContext): T = v +} + + +class DynamicCodecRegistry extends CodecRegistry { + + import collection.JavaConverters._ + + def get[T](it: Class[T]): Codec[T] = Try { + providedCodecs.get(it) + }.toOption.orElse { + Some(registered(it).asInstanceOf[Codec[T]]) + }.get + + def get[T](it: Class[T], registry: CodecRegistry): Codec[T] = Try { + providedCodecs.get(it) + }.toOption.orElse { + Some(registered(it).asInstanceOf[Codec[T]]) + }.get + + def register[T](codec: Codec[T]) = { + registered.put(codec.getEncoderClass, codec) + } + + def registerFor[T, V <: T](codec: Codec[T], v: Class[V]) = { + registered.put(v, codec) + } + + val providedCodecs: CodecRegistry = + CodecRegistries.fromRegistries( + CodecRegistries.fromCodecs(new DoubleCodec, new IntCodec, new LongCodec, new InstantCodec, new BooleanCodec, new UUIDCodec, new ObjectIdCodec) + ) + + val registered: concurrent.Map[Class[_], Codec[_]] = + new ConcurrentHashMap[Class[_], Codec[_]]().asScala +} + +object CodecGen { + + def apply[T](registry: DynamicCodecRegistry): Codec[T] = macro registerCodec[T] + + def gen[T](registry: DynamicCodecRegistry): Codec[T] = macro materializeCodec[T] + + def forSealedImpl[T: c.WeakTypeTag](c: whitebox.Context)(registry: c.Expr[DynamicCodecRegistry]): c.Expr[Codec[T]] = { + import c.universe._ + val tpe = c.weakTypeOf[T] + val ctpe = tpe.typeSymbol.asClass + require(ctpe.isSealed) + require(ctpe.knownDirectSubclasses.nonEmpty) + val (objects, caseClasses) = { + val (os, cs) = ctpe.knownDirectSubclasses.partition(_.isModuleClass) + (os.map(_.asClass.module.asModule), cs.map(_.asClass)) + } + + def nameOf(s: Symbol) = s.fullName.split("\\.").last + + val objCodecs = objects.map(o => + q"""${nameOf(o)} -> { + new com.harana.macros.mongo.ExistentialCodec[$tpe]($o).asInstanceOf[Codec[$tpe]] + }""" + ) + + val ccCodecs = caseClasses.map(cc => + q"${nameOf(cc)} -> com.harana.macros.mongo.CodecGen.gen[$cc]($registry).asInstanceOf[Codec[$tpe]]" + ) + + val e = c.Expr[Codec[T]] { + q"""{ + val codec = new org.bson.codecs.Codec[$tpe] { + import org.bson._ + import org.bson.codecs._ + val codecs: Map[String, Codec[$tpe]] = List(..$objCodecs,..$ccCodecs).toMap + + private def rtNameOf(v: $tpe): String = { + ${ + val cases = ctpe.knownDirectSubclasses.map { + case c: ClassSymbol if c.isModuleClass => cq"x if x == ${c.module} => ${nameOf(c)}" + case c: ClassSymbol => cq" _ : ${c.name} => ${nameOf(c)}" + } + q""" + v match { + case ..$cases + } + """ + } + + } + private val sentinelType = "__type" + private val payloadName = "payload" + override def getEncoderClass: Class[$tpe] = classOf[$tpe] + + override def encode(writer: BsonWriter, value: $tpe, encoderContext: EncoderContext) = { + val typeName = rtNameOf(value) + writer.writeStartDocument + writer.writeString(sentinelType, typeName) + val codec = codecs(typeName) + if (!codec.isInstanceOf[com.harana.macros.mongo.ExistentialCodec[$tpe]]) { + writer.writeName(payloadName) + } + codec.encode(writer, value, encoderContext) + writer.writeEndDocument + } + + override def decode(reader: BsonReader, decoderContext: DecoderContext): $tpe = { + reader.readStartDocument + val typeName = reader.readString(sentinelType) + val codec = codecs(typeName) + if (!codec.isInstanceOf[com.harana.macros.mongo.ExistentialCodec[$tpe]]) { + reader.readName(payloadName) + } + val value = codec.decode(reader, decoderContext) + reader.readEndDocument + value + } + + } + $registry.register(codec) + ${ + val regObjs = objects.map(o => q"$registry.registerFor(codec, ${o.name}.getClass)") + val regCCs = caseClasses.map(cc => q"$registry.registerFor(codec, classOf[$cc])") + + q"""{..$regCCs;..$regObjs}""" + } + codec + }""" + + } + e + } + + def registerCodec[T: c.WeakTypeTag](c: whitebox.Context)(registry: c.Expr[DynamicCodecRegistry]): c.Expr[Codec[T]] = { + import c.universe._ + c.Expr[Codec[T]] { + q""" + val e = ${materializeCodec[T](c)(registry)} + $registry.register(e) + e + """ + } + } + + def materializeCodec[T: c.WeakTypeTag](c: whitebox.Context)(registry: c.Expr[DynamicCodecRegistry]): c.Expr[Codec[T]] = { + import c.universe._ + val tpe = weakTypeOf[T] + if (tpe.typeSymbol.isAbstract) { + // try generation for sealed classes + return forSealedImpl[T](c)(registry) + } + + val constructor = tpe.decls.collectFirst { + case m: MethodSymbol if m.isPrimaryConstructor => m + }.get + + abstract sealed class FieldType { + def tpe: Type + + def codecExpr: Tree + } + case class SimpleField(tpe: Type) extends FieldType { + def codecExpr: Tree = { + q"""$registry.get(classOf[${tpe.typeSymbol}]).asInstanceOf[Codec[Any]]""" + } + } + case class EnumerationField(tpe: Type) extends FieldType { + def codecExpr: Tree = q"""new com.harana.macros.mongo.EnumerationCodec[$tpe]().asInstanceOf[Codec[Any]]""" + } + case class SeqField(inner: FieldType) extends FieldType { + def tpe: c.universe.Type = appliedType(typeOf[List[Any]].typeConstructor, List(inner.tpe)) + + def codecExpr: Tree = q"""new com.harana.macros.mongo.SeqCodec(${inner.codecExpr}).asInstanceOf[Codec[Any]]""" + } + case class SetField(inner: FieldType) extends FieldType { + def tpe: c.universe.Type = appliedType(typeOf[Set[Any]].typeConstructor, List(inner.tpe)) + + def codecExpr: Tree = q"""new com.harana.macros.mongo.SetCodec(${inner.codecExpr}).asInstanceOf[Codec[Any]]""" + } + case class ListField(inner: FieldType) extends FieldType { + def tpe: c.universe.Type = appliedType(typeOf[List[Any]].typeConstructor, List(inner.tpe)) + + def codecExpr: Tree = q"""new com.harana.macros.mongo.ListCodec(${inner.codecExpr}).asInstanceOf[Codec[Any]]""" + } + case class MapField(key: FieldType, value: FieldType) extends FieldType { + def tpe: c.universe.Type = appliedType(typeOf[Map[Any, Any]].typeConstructor, List(key.tpe, value.tpe)) + + def codecExpr: Tree = q"""new com.harana.macros.mongo.MapCodec[${key.tpe},${value.tpe}](${value.codecExpr}).asInstanceOf[Codec[Any]]""" + } + case class EitherField(innerA: FieldType, innerB: FieldType) extends FieldType { + def tpe: c.universe.Type = appliedType(typeOf[Either[Any, Any]].typeConstructor, List(innerA.tpe, innerB.tpe)) + + def codecExpr: Tree = q"""new com.harana.macros.mongo.EitherCodec(${innerA.codecExpr}, ${innerB.codecExpr}).asInstanceOf[Codec[Any]]""" + } + + object FieldType { + def apply(outer: Type): FieldType = { + val TypeRef(_, _, inner) = outer + if (inner.isEmpty) { + if (outer <:< typeOf[Enumeration#Value]) { + EnumerationField(outer) + } else { + SimpleField(outer) + } + } else { + if (outer.typeConstructor == typeOf[List[Any]].typeConstructor) { + SeqField(FieldType(inner.head)) + } else if (outer.typeConstructor == typeOf[Set[Any]].typeConstructor) { + SetField(FieldType(inner.head)) + } else if (outer.typeConstructor == typeOf[List[Any]].typeConstructor) { + ListField(FieldType(inner.head)) + } else if (outer.typeConstructor == typeOf[Either[Any, Any]].typeConstructor) { + EitherField(FieldType(inner.head), FieldType(inner(1))) + } else if (outer.typeConstructor == typeOf[Map[Any, Any]].typeConstructor) { + MapField(FieldType(inner.head), FieldType(inner(1))) + } else { + throw new RuntimeException("Unsupported generic type mapping " + outer) + } + } + } + } + + case class Field(name: Name, option: Boolean, raw: FieldType) + val fields = constructor.paramLists.head.map { field => + val name = field.name.decodedName + val ftpe = tpe.decl(field.name).typeSignature.resultType + val isOption = ftpe.typeConstructor == typeOf[Option[_]].typeConstructor + val deoptioned: Type = if (isOption) { + val TypeRef(_, _, inner) = ftpe + inner.head + } else ftpe + Field(name, isOption, FieldType(deoptioned)) + } + + val e = c.Expr[Codec[T]] { + q"""{ + + val codec = new org.bson.codecs.Codec[$tpe] { + import org.bson._ + import org.bson.codecs._ + + val _codecs = ${ + fields.map { field => + q"""scala.util.Try { + ${field.raw.codecExpr} + }.getOrElse { + throw new RuntimeException("Can not find Codec for " + ${field.raw.toString} + + " while mapping case class " + ${tpe.typeSymbol.fullName.toString} + ) + } + """ + } + } + + def getEncoderClass():Class[$tpe] = classOf[$tpe] + + def encode(writer: BsonWriter, it:$tpe, encoderContext: EncoderContext) = {${ + q"{..${ + q"""writer.writeStartDocument""" +: + fields.zipWithIndex.flatMap { case (field, ix) => + if (field.option) { + List( + q"""if(it.${field.name.toTermName}.nonEmpty) ${ + q"{..${ + List( + q"""writer.writeName(${Literal(Constant(field.name.toString))})""", + q"""_codecs($ix).encode(writer, it.${field.name.toTermName}.get, encoderContext)""" + ) + }}" + }""" + ) + } else { + List( + q"""writer.writeName(${Literal(Constant(field.name.toString))})""", + q"""_codecs($ix).encode(writer, it.${field.name.toTermName}, encoderContext)""" + ) + } + } :+ + q"""writer.writeEndDocument""" + }}" + }} + + def decode(reader: BsonReader, decoderContext: DecoderContext):$tpe = ${ + q"{..${ + (q"""reader.readStartDocument""" +: + fields.map { f => + q"""var ${f.name.toTermName}:Option[${f.raw.tpe}] = None""" + } :+ + q"""while(reader.readBsonType != BsonType.END_OF_DOCUMENT) ${ + Match( + q"""reader.readName()""", + fields.zipWithIndex.map { case (field, ix) => + cq"""${Literal(Constant(field.name.decodedName.toString))} => + ${field.name.toTermName} = Some(_codecs($ix).decode(reader, decoderContext).asInstanceOf[${field.raw.tpe}]) """ + } :+ + cq"""foo => println("Ignore unmapped field `" + foo + "'"); reader.skipValue()""" //FIXME add tests + ) + }""" :+ + q"""reader.readEndDocument""") ++ + fields.filter(!_.option).map { field => + q"""if(${field.name.toTermName}.isEmpty) { + throw new RuntimeException("No value found for required field `" + ${field.name.toTermName.toString} + "'") + }""" + } :+ + q"""new $tpe(${fields.map(f => if (f.option) q"""${f.name.toTermName}""" else q"""${f.name.toTermName}.get"""): _*})""" + }}" + } + } + codec + }""" + } + e + } +} diff --git a/macros/src/main/scala/com/harana/macros/mongo/MongoCodec.scala b/macros/src/main/scala/com/harana/macros/mongo/MongoCodec.scala new file mode 100644 index 0000000..6ed47ad --- /dev/null +++ b/macros/src/main/scala/com/harana/macros/mongo/MongoCodec.scala @@ -0,0 +1,30 @@ +package com.harana.macros.mongo + +import scala.language.experimental.macros +import scala.annotation.StaticAnnotation +import scala.annotation.compileTimeOnly +import scala.reflect.macros.whitebox + +@compileTimeOnly("enable macro paradise to expand macro annotations") +class MongoCodec(anyRef: AnyRef) extends StaticAnnotation { + def macroTransform(annottees: Any*): Any = macro MongoCodecMacro.impl +} + +object MongoCodecMacro { + + def impl(c: whitebox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { + + import c.universe._ + + val result = annottees map (_.tree) match { + case (classDef @ q"$mods class $tpname[..$tparams] $ctorMods(...$paramss) extends { ..$earlydefns } with ..$parents { $self => ..$stats }") :: _ => + + q""" + $mods class $tpname[..$tparams] $ctorMods(...$paramss) extends { ..$earlydefns } with ..$parents { $self => + ..$stats + } + """ + } + c.Expr[Any](result) + } +} \ No newline at end of file diff --git a/macros/src/main/scala/com/harana/macros/mongo/package.scala b/macros/src/main/scala/com/harana/macros/mongo/package.scala new file mode 100755 index 0000000..271bb99 --- /dev/null +++ b/macros/src/main/scala/com/harana/macros/mongo/package.scala @@ -0,0 +1,42 @@ +package com.harana.macros + +import org.bson._ +import org.bson.codecs._ +import org.bson.codecs.configuration._ + +import scala.reflect.runtime.universe._ +import scala.reflect._ + +package object mongo { + + private val mirror: Mirror = runtimeMirror(getClass.getClassLoader) + + def reflectEnum[T: TypeTag](name: String): T = { + typeOf[T] match { + case TypeRef(enumType, _, _) => + val methodSymbol = enumType.member(TermName("withName")).asMethod + val moduleSymbol = enumType.termSymbol.asModule + reflect(moduleSymbol, methodSymbol)(name).asInstanceOf[T] + } + } + + private def reflect(module: ModuleSymbol, method: MethodSymbol)(args: Any*): Any = { + val moduleMirror = mirror.reflectModule(module) + val instanceMirror = mirror.reflect(moduleMirror.instance) + instanceMirror.reflectMethod(method)(args:_*) + } + + def toDBObject(a: Any)(implicit repo: CodecRegistry): BsonDocument = { + val codec: Encoder[Any] = repo.get(a.getClass).asInstanceOf[Encoder[Any]] + val doc = new BsonDocument() + val writer = new BsonDocumentWriter(doc) + codec.encode(writer, a, EncoderContext.builder().build()) + doc + } + + def fromDBObject[T](doc: BsonDocument)(implicit ct: ClassTag[T], repo: CodecRegistry): T = { + val codec: Decoder[Any] = repo.get(ct.runtimeClass).asInstanceOf[Decoder[Any]] + val reader = new BsonDocumentReader(doc) + codec.decode(reader, DecoderContext.builder().build()).asInstanceOf[T] + } +} diff --git a/project/build.properties b/project/build.properties new file mode 100755 index 0000000..8cf07b7 --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version=1.9.8 \ No newline at end of file diff --git a/project/plugins.sbt b/project/plugins.sbt new file mode 100755 index 0000000..4841d9c --- /dev/null +++ b/project/plugins.sbt @@ -0,0 +1,2 @@ +resolvers += Resolver.url("harana", url("https://maven.pkg.github.com/harana/sbt-plugin"))(Patterns("[organisation]/[module]/[revision]/[artifact].[ext]") ) +addSbtPlugin("com.harana" % "sbt_js_jvm" % "0.0.0") \ No newline at end of file